mirror of
https://github.com/gomods/athens
synced 2026-02-03 11:00:32 +00:00
Config cleanup (#860)
* add default env vars * del setRuntimeDefaults * replace cdn conf with azure config and remove baseURL
This commit is contained in:
@@ -47,15 +47,12 @@ func GetStorage(storageType string, storageConfig *config.StorageConfig) (storag
|
||||
if storageConfig.GCP == nil {
|
||||
return nil, errors.E(op, "Invalid GCP Storage Configuration")
|
||||
}
|
||||
if storageConfig.CDN == nil {
|
||||
return nil, errors.E(op, "Invalid CDN Storage Configuration")
|
||||
}
|
||||
return gcp.New(context.Background(), storageConfig.GCP, storageConfig.CDN)
|
||||
return gcp.New(context.Background(), storageConfig.GCP)
|
||||
case "s3":
|
||||
if storageConfig.S3 == nil {
|
||||
return nil, errors.E(op, "Invalid S3 Storage Configuration")
|
||||
}
|
||||
return s3.New(storageConfig.S3, storageConfig.CDN)
|
||||
return s3.New(storageConfig.S3)
|
||||
default:
|
||||
return nil, fmt.Errorf("storage type %s is unknown", storageType)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
package config
|
||||
|
||||
// AzureConfig specifies the properties required to use Azure as the storage backend
|
||||
type AzureConfig struct {
|
||||
TimeoutConf
|
||||
AccountName string `validate:"required" envconfig:"ATHENS_AZURE_ACCOUNT_NAME"`
|
||||
AccountKey string `validate:"required" envconfig:"ATHENS_AZURE_ACCOUNT_KEY"`
|
||||
ContainerName string `validate:"required" envconfig:"ATHENS_AZURE_CONTAINER_NAME"`
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package config
|
||||
|
||||
import "net/url"
|
||||
|
||||
// CDNConfig specifies the properties required to use a CDN as the storage backend
|
||||
type CDNConfig struct {
|
||||
TimeoutConf
|
||||
Endpoint string `envconfig:"CDN_ENDPOINT"`
|
||||
}
|
||||
|
||||
// CDNEndpointWithDefault returns CDN endpoint if set
|
||||
// if not it should default to clouds default blob storage endpoint e.g
|
||||
func (c *CDNConfig) CDNEndpointWithDefault(value *url.URL) *url.URL {
|
||||
if c.Endpoint == "" {
|
||||
return value
|
||||
}
|
||||
rawURI := c.Endpoint
|
||||
|
||||
uri, err := url.Parse(rawURI)
|
||||
if err != nil {
|
||||
return value
|
||||
}
|
||||
return uri
|
||||
}
|
||||
+20
-15
@@ -71,17 +71,9 @@ func ParseConfigFile(configFile string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set default values
|
||||
setRuntimeDefaults(&config)
|
||||
|
||||
// If not defined, set storage timeouts to global timeout
|
||||
setStorageTimeouts(config.Storage, config.Timeout)
|
||||
|
||||
// delete invalid storage backend configs
|
||||
// envconfig initializes *all* struct pointers, even if there are no corresponding defaults or env variables
|
||||
// this method prunes all such invalid configurations
|
||||
deleteInvalidStorageConfigs(config.Storage)
|
||||
|
||||
// validate all required fields have been populated
|
||||
if err := validateConfig(config); err != nil {
|
||||
return nil, err
|
||||
@@ -89,22 +81,35 @@ func ParseConfigFile(configFile string) (*Config, error) {
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func setRuntimeDefaults(config *Config) {
|
||||
// TODO: Set defaults here
|
||||
}
|
||||
|
||||
// envOverride uses Environment variables to override unspecified properties
|
||||
func envOverride(config *Config) error {
|
||||
return envconfig.Process("athens", config)
|
||||
}
|
||||
|
||||
func validateConfig(c Config) error {
|
||||
func validateConfig(config Config) error {
|
||||
validate := validator.New()
|
||||
err := validate.Struct(c)
|
||||
err := validate.StructExcept(config, "Storage")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
switch config.StorageType {
|
||||
case "memory":
|
||||
return nil
|
||||
case "mongo":
|
||||
return validate.Struct(config.Storage.Mongo)
|
||||
case "disk":
|
||||
return validate.Struct(config.Storage.Disk)
|
||||
case "minio":
|
||||
return validate.Struct(config.Storage.Minio)
|
||||
case "gcp":
|
||||
return validate.Struct(config.Storage.GCP)
|
||||
case "s3":
|
||||
return validate.Struct(config.Storage.S3)
|
||||
case "azure":
|
||||
return validate.Struct(config.Storage.Azure)
|
||||
default:
|
||||
return fmt.Errorf("storage type %s is unknown", config.StorageType)
|
||||
}
|
||||
}
|
||||
|
||||
// GetConf accepts the path to a file, constructs an absolute path to the file,
|
||||
|
||||
@@ -19,15 +19,10 @@ func compareConfigs(parsedConf *Config, expConf *Config, t *testing.T) {
|
||||
if !eq {
|
||||
t.Errorf("Parsed Example configuration did not match expected values. Expected: %+v. Actual: %+v", expConf, parsedConf)
|
||||
}
|
||||
compareStorageConfigs(parsedConf.Storage, expConf.Storage, t)
|
||||
}
|
||||
|
||||
func compareStorageConfigs(parsedStorage *StorageConfig, expStorage *StorageConfig, t *testing.T) {
|
||||
eq := cmp.Equal(parsedStorage.CDN, expStorage.CDN)
|
||||
if !eq {
|
||||
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.CDN, parsedStorage.CDN)
|
||||
}
|
||||
eq = cmp.Equal(parsedStorage.Mongo, expStorage.Mongo)
|
||||
eq := cmp.Equal(parsedStorage.Mongo, expStorage.Mongo)
|
||||
if !eq {
|
||||
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.Mongo, parsedStorage.Mongo)
|
||||
}
|
||||
@@ -98,22 +93,13 @@ func TestEnvOverrides(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Env override failed: %v", err)
|
||||
}
|
||||
deleteInvalidStorageConfigs(conf.Storage)
|
||||
|
||||
compareConfigs(conf, expConf, t)
|
||||
restoreEnv(envVarBackup)
|
||||
}
|
||||
|
||||
func TestStorageEnvOverrides(t *testing.T) {
|
||||
|
||||
globalTimeout := 300
|
||||
expStorage := &StorageConfig{
|
||||
CDN: &CDNConfig{
|
||||
Endpoint: "cdnEndpoint",
|
||||
TimeoutConf: TimeoutConf{
|
||||
Timeout: globalTimeout,
|
||||
},
|
||||
},
|
||||
Disk: &DiskConfig{
|
||||
RootPath: "/my/root/path",
|
||||
},
|
||||
@@ -165,8 +151,6 @@ func TestStorageEnvOverrides(t *testing.T) {
|
||||
t.Fatalf("Env override failed: %v", err)
|
||||
}
|
||||
setStorageTimeouts(conf.Storage, globalTimeout)
|
||||
deleteInvalidStorageConfigs(conf.Storage)
|
||||
|
||||
compareStorageConfigs(conf.Storage, expStorage, t)
|
||||
restoreEnv(envVarBackup)
|
||||
}
|
||||
@@ -178,7 +162,6 @@ func TestParseExampleConfig(t *testing.T) {
|
||||
// initialize all struct pointers so we get all applicable env variables
|
||||
emptyConf := &Config{
|
||||
Storage: &StorageConfig{
|
||||
CDN: &CDNConfig{},
|
||||
Disk: &DiskConfig{},
|
||||
GCP: &GCPConfig{},
|
||||
Minio: &MinioConfig{
|
||||
@@ -200,12 +183,6 @@ func TestParseExampleConfig(t *testing.T) {
|
||||
globalTimeout := 300
|
||||
|
||||
expStorage := &StorageConfig{
|
||||
CDN: &CDNConfig{
|
||||
Endpoint: "cdn.example.com",
|
||||
TimeoutConf: TimeoutConf{
|
||||
Timeout: globalTimeout,
|
||||
},
|
||||
},
|
||||
Disk: &DiskConfig{
|
||||
RootPath: "/path/on/disk",
|
||||
},
|
||||
@@ -305,9 +282,6 @@ func getEnvMap(config *Config) map[string]string {
|
||||
|
||||
storage := config.Storage
|
||||
if storage != nil {
|
||||
if storage.CDN != nil {
|
||||
envVars["CDN_ENDPOINT"] = storage.CDN.Endpoint
|
||||
}
|
||||
if storage.Disk != nil {
|
||||
envVars["ATHENS_DISK_STORAGE_ROOT"] = storage.Disk.RootPath
|
||||
}
|
||||
|
||||
+3
-47
@@ -1,24 +1,19 @@
|
||||
package config
|
||||
|
||||
import validator "gopkg.in/go-playground/validator.v9"
|
||||
|
||||
// StorageConfig provides configs for various storage backends
|
||||
type StorageConfig struct {
|
||||
CDN *CDNConfig
|
||||
Disk *DiskConfig
|
||||
GCP *GCPConfig
|
||||
Minio *MinioConfig
|
||||
Mongo *MongoConfig
|
||||
S3 *S3Config
|
||||
Azure *AzureConfig
|
||||
}
|
||||
|
||||
func setStorageTimeouts(s *StorageConfig, defaultTimeout int) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if s.CDN != nil && s.CDN.Timeout == 0 {
|
||||
s.CDN.Timeout = defaultTimeout
|
||||
}
|
||||
if s.GCP != nil && s.GCP.Timeout == 0 {
|
||||
s.GCP.Timeout = defaultTimeout
|
||||
}
|
||||
@@ -31,46 +26,7 @@ func setStorageTimeouts(s *StorageConfig, defaultTimeout int) {
|
||||
if s.S3 != nil && s.S3.Timeout == 0 {
|
||||
s.S3.Timeout = defaultTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// envconfig initializes *all* struct pointers, even if there are no corresponding defaults or env variables
|
||||
// deleteInvalidStorageConfigs prunes all such invalid configurations
|
||||
func deleteInvalidStorageConfigs(s *StorageConfig) {
|
||||
validate := validator.New()
|
||||
|
||||
if s.CDN != nil {
|
||||
if err := validate.Struct(s.CDN); err != nil {
|
||||
s.CDN = nil
|
||||
}
|
||||
}
|
||||
|
||||
if s.Disk != nil {
|
||||
if err := validate.Struct(s.Disk); err != nil {
|
||||
s.Disk = nil
|
||||
}
|
||||
}
|
||||
|
||||
if s.GCP != nil {
|
||||
if err := validate.Struct(s.GCP); err != nil {
|
||||
s.GCP = nil
|
||||
}
|
||||
}
|
||||
|
||||
if s.Minio != nil {
|
||||
if err := validate.Struct(s.Minio); err != nil {
|
||||
s.Minio = nil
|
||||
}
|
||||
}
|
||||
|
||||
if s.Mongo != nil {
|
||||
if err := validate.Struct(s.Mongo); err != nil {
|
||||
s.Mongo = nil
|
||||
}
|
||||
}
|
||||
|
||||
if s.S3 != nil {
|
||||
if err := validate.Struct(s.S3); err != nil {
|
||||
s.S3 = nil
|
||||
}
|
||||
if s.Azure != nil && s.Azure.Timeout == 0 {
|
||||
s.Azure.Timeout = defaultTimeout
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package azurecdn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/2017-07-29/azblob"
|
||||
"github.com/gomods/athens/pkg/errors"
|
||||
"github.com/gomods/athens/pkg/observ"
|
||||
)
|
||||
|
||||
type azureBlobStoreClient struct {
|
||||
containerURL *azblob.ContainerURL
|
||||
}
|
||||
|
||||
func newBlobStoreClient(accountURL *url.URL, accountName, accountKey, containerName string) *azureBlobStoreClient {
|
||||
cred := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
pipe := azblob.NewPipeline(cred, azblob.PipelineOptions{})
|
||||
serviceURL := azblob.NewServiceURL(*accountURL, pipe)
|
||||
// rules on container names:
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
|
||||
//
|
||||
// This container must exist
|
||||
containerURL := serviceURL.NewContainerURL(containerName)
|
||||
cl := &azureBlobStoreClient{containerURL: &containerURL}
|
||||
return cl
|
||||
}
|
||||
|
||||
func (c *azureBlobStoreClient) UploadWithContext(ctx context.Context, path, contentType string, content io.Reader) error {
|
||||
const op errors.Op = "azurecdn.UploadWithContext"
|
||||
ctx, span := observ.StartSpan(ctx, op.String())
|
||||
defer span.End()
|
||||
blobURL := c.containerURL.NewBlockBlobURL(path)
|
||||
emptyMeta := map[string]string{}
|
||||
emptyBlobAccessCond := azblob.BlobAccessConditions{}
|
||||
httpHeaders := func(contentType string) azblob.BlobHTTPHeaders {
|
||||
return azblob.BlobHTTPHeaders{
|
||||
ContentType: contentType,
|
||||
}
|
||||
}
|
||||
bufferSize := 1 * 1024 * 1024 // Size of the rotating buffers that are used when uploading
|
||||
maxBuffers := 3 // Number of rotating buffers that are used when uploading
|
||||
|
||||
uploadStreamOpts := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: bufferSize,
|
||||
MaxBuffers: maxBuffers,
|
||||
BlobHTTPHeaders: httpHeaders(contentType),
|
||||
Metadata: emptyMeta,
|
||||
AccessConditions: emptyBlobAccessCond,
|
||||
}
|
||||
_, err := azblob.UploadStreamToBlockBlob(ctx, content, blobURL, uploadStreamOpts)
|
||||
if err != nil {
|
||||
return errors.E(op, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package azurecdn
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/2017-07-29/azblob"
|
||||
"github.com/gomods/athens/pkg/config"
|
||||
"github.com/gomods/athens/pkg/errors"
|
||||
"github.com/gomods/athens/pkg/observ"
|
||||
@@ -17,56 +18,78 @@ type client interface {
|
||||
UploadWithContext(ctx context.Context, path, contentType string, content io.Reader) error
|
||||
}
|
||||
|
||||
type azureBlobStoreClient struct {
|
||||
containerURL *azblob.ContainerURL
|
||||
}
|
||||
|
||||
func newBlobStoreClient(accountURL *url.URL, accountName, accountKey, containerName string) *azureBlobStoreClient {
|
||||
cred := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
pipe := azblob.NewPipeline(cred, azblob.PipelineOptions{})
|
||||
serviceURL := azblob.NewServiceURL(*accountURL, pipe)
|
||||
// rules on container names:
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
|
||||
//
|
||||
// This container must exist
|
||||
containerURL := serviceURL.NewContainerURL(containerName)
|
||||
cl := &azureBlobStoreClient{containerURL: &containerURL}
|
||||
return cl
|
||||
}
|
||||
|
||||
// Storage implements (github.com/gomods/athens/pkg/storage).Saver and
|
||||
// also provides a function to fetch the location of a module
|
||||
type Storage struct {
|
||||
cl client
|
||||
baseURI *url.URL
|
||||
cdnConf *config.CDNConfig
|
||||
cl client
|
||||
conf *config.AzureConfig
|
||||
}
|
||||
|
||||
// New creates a new azure CDN saver
|
||||
func New(accountName, accountKey, containerName string, cdnConf *config.CDNConfig) (*Storage, error) {
|
||||
const op errors.Op = "azurecdn.New"
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
|
||||
// New creates a new azure blobs storage saver
|
||||
func New(conf *config.AzureConfig) (*Storage, error) {
|
||||
const op errors.Op = "azure.New"
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", conf.AccountName))
|
||||
if err != nil {
|
||||
return nil, errors.E(op, err)
|
||||
}
|
||||
cl := newBlobStoreClient(u, accountName, accountKey, containerName)
|
||||
return &Storage{cl: cl, baseURI: u, cdnConf: cdnConf}, nil
|
||||
}
|
||||
|
||||
// newWithClient creates a new azure CDN saver
|
||||
func newWithClient(accountName, cl client, cdnConf *config.CDNConfig) (*Storage, error) {
|
||||
const op errors.Op = "azurecdn.newWithClient"
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
|
||||
if err != nil {
|
||||
return nil, errors.E(op, err)
|
||||
}
|
||||
return &Storage{cl: cl, baseURI: u, cdnConf: cdnConf}, nil
|
||||
}
|
||||
|
||||
// BaseURL returns the base URL that stores all modules. It can be used
|
||||
// in the "meta" tag redirect response to vgo.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// <meta name="go-import" content="gomods.com/athens mod BaseURL()">
|
||||
func (s Storage) BaseURL() *url.URL {
|
||||
return s.cdnConf.CDNEndpointWithDefault(s.baseURI)
|
||||
cl := newBlobStoreClient(u, conf.AccountName, conf.AccountKey, conf.ContainerName)
|
||||
return &Storage{cl: cl, conf: conf}, nil
|
||||
}
|
||||
|
||||
// Save implements the (github.com/gomods/athens/pkg/storage).Saver interface.
|
||||
func (s *Storage) Save(ctx context.Context, module, version string, mod []byte, zip io.Reader, info []byte) error {
|
||||
const op errors.Op = "azurecdn.Save"
|
||||
const op errors.Op = "azure.Save"
|
||||
ctx, span := observ.StartSpan(ctx, op.String())
|
||||
defer span.End()
|
||||
err := moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.cl.UploadWithContext, s.cdnConf.TimeoutDuration())
|
||||
// TODO: take out lease on the /list file and add the version to it
|
||||
//
|
||||
// Do that only after module source+metadata is uploaded
|
||||
err := moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.cl.UploadWithContext, s.conf.TimeoutDuration())
|
||||
if err != nil {
|
||||
return errors.E(op, err, errors.M(module), errors.V(version))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *azureBlobStoreClient) UploadWithContext(ctx context.Context, path, contentType string, content io.Reader) error {
|
||||
const op errors.Op = "azure.UploadWithContext"
|
||||
ctx, span := observ.StartSpan(ctx, op.String())
|
||||
defer span.End()
|
||||
blobURL := c.containerURL.NewBlockBlobURL(path)
|
||||
emptyMeta := map[string]string{}
|
||||
emptyBlobAccessCond := azblob.BlobAccessConditions{}
|
||||
httpHeaders := func(contentType string) azblob.BlobHTTPHeaders {
|
||||
return azblob.BlobHTTPHeaders{
|
||||
ContentType: contentType,
|
||||
}
|
||||
}
|
||||
bufferSize := 1 * 1024 * 1024 // Size of the rotating buffers that are used when uploading
|
||||
maxBuffers := 3 // Number of rotating buffers that are used when uploading
|
||||
|
||||
uploadStreamOpts := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: bufferSize,
|
||||
MaxBuffers: maxBuffers,
|
||||
BlobHTTPHeaders: httpHeaders(contentType),
|
||||
Metadata: emptyMeta,
|
||||
AccessConditions: emptyBlobAccessCond,
|
||||
}
|
||||
_, err := azblob.UploadStreamToBlockBlob(ctx, content, blobURL, uploadStreamOpts)
|
||||
if err != nil {
|
||||
return errors.E(op, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gomods/athens/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
@@ -32,7 +31,7 @@ func (g *GcpTests) SetupSuite() {
|
||||
g.version = "v1.2.3"
|
||||
g.url, _ = url.Parse("https://storage.googleapis.com/testbucket")
|
||||
g.bucket = newBucketMock()
|
||||
g.store = newWithBucket(g.bucket, g.url, time.Second, &config.CDNConfig{})
|
||||
g.store = newWithBucket(g.bucket, g.url, time.Second)
|
||||
}
|
||||
|
||||
func TestGcpStorage(t *testing.T) {
|
||||
|
||||
+2
-15
@@ -19,7 +19,6 @@ type Storage struct {
|
||||
baseURI *url.URL
|
||||
closeStorage func() error
|
||||
projectID string
|
||||
cdnConf *config.CDNConfig
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
@@ -31,7 +30,7 @@ type Storage struct {
|
||||
// to the path of your service account file. If you're running on GCP (e.g. AppEngine),
|
||||
// credentials will be automatically provided.
|
||||
// See https://cloud.google.com/docs/authentication/getting-started.
|
||||
func New(ctx context.Context, gcpConf *config.GCPConfig, cdnConf *config.CDNConfig) (*Storage, error) {
|
||||
func New(ctx context.Context, gcpConf *config.GCPConfig) (*Storage, error) {
|
||||
const op errors.Op = "gcp.New"
|
||||
storage, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
@@ -52,7 +51,6 @@ func New(ctx context.Context, gcpConf *config.GCPConfig, cdnConf *config.CDNConf
|
||||
bucket: &bkt,
|
||||
baseURI: u,
|
||||
closeStorage: storage.Close,
|
||||
cdnConf: cdnConf,
|
||||
timeout: gcpConf.TimeoutDuration(),
|
||||
}, nil
|
||||
}
|
||||
@@ -66,26 +64,15 @@ func bucketExistsErr(err error) bool {
|
||||
return apiErr.Code == http.StatusConflict
|
||||
}
|
||||
|
||||
func newWithBucket(bkt Bucket, uri *url.URL, timeout time.Duration, cdnConf *config.CDNConfig) *Storage {
|
||||
func newWithBucket(bkt Bucket, uri *url.URL, timeout time.Duration) *Storage {
|
||||
return &Storage{
|
||||
bucket: bkt,
|
||||
baseURI: uri,
|
||||
closeStorage: func() error { return nil },
|
||||
timeout: timeout,
|
||||
cdnConf: cdnConf,
|
||||
}
|
||||
}
|
||||
|
||||
// BaseURL returns the base URL that stores all modules. It can be used
|
||||
// in the "meta" tag redirect response to vgo.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// <meta name="go-import" content="gomods.com/athens mod BaseURL()">
|
||||
func (s *Storage) BaseURL() *url.URL {
|
||||
return s.cdnConf.CDNEndpointWithDefault(s.baseURI)
|
||||
}
|
||||
|
||||
// Close calls the underlying storage client's close method
|
||||
// It is not required to be called on program exit but provided here
|
||||
// for completness.
|
||||
|
||||
+1
-16
@@ -28,11 +28,10 @@ type Storage struct {
|
||||
uploader s3manageriface.UploaderAPI
|
||||
s3API s3iface.S3API
|
||||
s3Conf *config.S3Config
|
||||
cdnConf *config.CDNConfig
|
||||
}
|
||||
|
||||
// New creates a new AWS S3 CDN saver
|
||||
func New(s3Conf *config.S3Config, cdnConf *config.CDNConfig, options ...func(*aws.Config)) (*Storage, error) {
|
||||
func New(s3Conf *config.S3Config, options ...func(*aws.Config)) (*Storage, error) {
|
||||
const op errors.Op = "s3.New"
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com", s3Conf.Bucket))
|
||||
if err != nil {
|
||||
@@ -60,20 +59,6 @@ func New(s3Conf *config.S3Config, cdnConf *config.CDNConfig, options ...func(*aw
|
||||
uploader: uploader,
|
||||
s3API: uploader.S3,
|
||||
baseURI: u,
|
||||
cdnConf: cdnConf,
|
||||
s3Conf: s3Conf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BaseURL returns the base URL that stores all modules. It can be used
|
||||
// in the "meta" tag redirect response to vgo.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// <meta name="go-import" content="gomods.com/athens mod BaseURL()">
|
||||
func (s Storage) BaseURL() *url.URL {
|
||||
if s.cdnConf == nil {
|
||||
return s.baseURI
|
||||
}
|
||||
return s.cdnConf.CDNEndpointWithDefault(s.baseURI)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,6 @@ func getStorage(t testing.TB) *Storage {
|
||||
Timeout: 300,
|
||||
},
|
||||
},
|
||||
nil,
|
||||
options,
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user