Plumb config - Latest (#627)

* switch proxy to config file

pull in single flight changes

* changes for single-flight

* intermediate stage. All tests passing. pkg still has env refs

* remove all env references

* delete config/env entirely

* fix failing tests

* create the config.toml file as part of dev setup

* create config file only if it doesn't exist

* update Dockerfiles to use config file

* move composing elements to the top

* verbose parameter naming

* newline

* add flag for config file path

* update docs with config file flag

* remove unnecessary nil check

* use filepath.join

* rename redis port to address

* fix path.join

* fix issues after merge

* add vendor dir
This commit is contained in:
Rohan Chakravarthy
2018-09-11 15:04:20 -07:00
committed by Aaron Schlesinger
parent bb250437da
commit 0e470d0294
207 changed files with 3397 additions and 19365 deletions
+4
View File
@@ -29,3 +29,7 @@ cmd/olympus/bin
cmd/proxy/bin
.idea
.DS_Store
# prod config file
config.toml
+1
View File
@@ -67,6 +67,7 @@ alldeps:
dev:
docker-compose -p athensdev up -d mongo
docker-compose -p athensdev up -d redis
./scripts/create_default_config.sh
.PHONY: down
down:
+2 -2
View File
@@ -32,8 +32,8 @@ spec:
value: {{ .Values.storage.mongo.url | quote }}
{{- end }}
# TODO: re-enable when workers are used
#- name: ATHENS_REDIS_QUEUE_PORT
# value: {{ .Values.redisPort | quote }}
#- name: ATHENS_REDIS_QUEUE_ADDRESS
# value: {{ .Values.redis.address | quote }}
ports:
- containerPort: 3000
{{- if eq .Values.storage.type "disk" }}
+1 -1
View File
@@ -45,7 +45,7 @@ storage:
# Worker is disabled right now. When it's turned on, we can use helm dependencies to start this up!
#redis:
# useEmbedded: false
# port: 6379
# address: ":6379"
# username:
# password:
# host:
+4 -1
View File
@@ -17,6 +17,8 @@ WORKDIR $GOPATH/src/github.com/gomods/athens
ADD . .
RUN cd cmd/olympus && buffalo build -s -o /bin/app
RUN scripts/create_default_config.sh
COPY config.toml /bin/config.toml
FROM alpine
RUN apk add --no-cache bash
@@ -25,6 +27,7 @@ RUN apk add --no-cache ca-certificates
WORKDIR /bin/
COPY --from=builder /bin/app .
COPY --from=builder /bin/config.toml .
# Comment out to run the binary in "production" mode:
# ENV GO_ENV=production
@@ -36,4 +39,4 @@ EXPOSE 3000
# Comment out to run the migrations before running the binary:
# CMD /bin/app migrate; /bin/app
CMD exec /bin/app
CMD exec /bin/app -config_file=config.toml
+9 -17
View File
@@ -2,17 +2,20 @@ package actions
import (
"encoding/json"
"path/filepath"
"testing"
"time"
"github.com/gobuffalo/gocraft-work-adapter"
"github.com/gobuffalo/suite"
"github.com/gocraft/work"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/eventlog"
"github.com/gomods/athens/pkg/eventlog/mongo"
"github.com/gomods/athens/pkg/payloads"
"github.com/gomods/athens/pkg/storage/mem"
)
var (
testConfigFile = filepath.Join("..", "..", "..", "config.test.toml")
)
type ActionSuite struct {
@@ -20,22 +23,11 @@ type ActionSuite struct {
}
func Test_ActionSuite(t *testing.T) {
stg, err := mem.NewStorage()
conf := config.GetConfLogErr(testConfigFile, t)
app, err := App(conf)
if err != nil {
t.Fatalf("error creating storage (%s)", err)
t.Fatalf("Failed to initialize app: %s", err)
}
mURI := env.MongoConnectionString()
certPath := env.MongoCertPath()
eLog, err := mongo.NewLog(mURI, certPath)
if err != nil {
t.Fatalf("error creating event log (%s)", err)
}
config := AppConfig{
Storage: stg,
EventLog: eLog,
CacheMissesLog: eLog,
}
app, err := App(&config)
as := &ActionSuite{suite.NewAction(app)}
suite.Run(t, as)
}
+63 -38
View File
@@ -1,7 +1,9 @@
package actions
import (
"fmt"
stdlog "log"
"time"
"github.com/gobuffalo/buffalo"
"github.com/gobuffalo/buffalo/middleware"
@@ -13,7 +15,7 @@ import (
"github.com/gobuffalo/packr"
"github.com/gocraft/work"
"github.com/gomods/athens/pkg/cdn/metadata/azurecdn"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/download"
"github.com/gomods/athens/pkg/eventlog"
"github.com/gomods/athens/pkg/log"
@@ -22,15 +24,19 @@ import (
"github.com/gomods/athens/pkg/storage"
"github.com/gomodule/redigo/redis"
"github.com/rs/cors"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/unrolled/secure"
)
// AppConfig contains dependencies used in App
type AppConfig struct {
Storage storage.Backend
EventLog eventlog.Eventlog
CacheMissesLog eventlog.Appender
type workerConfig struct {
store storage.Backend
eLog eventlog.Eventlog
wType string
redisEndpoint string
maxConc int
maxFails uint
downloadTimeout time.Duration
}
const (
@@ -47,9 +53,6 @@ var (
workerModuleKey = "module"
workerVersionKey = "version"
workerPushNotificationKey = "push-notification"
// ENV is used to help switch settings based on where the
// application is being run. Default is "development".
ENV = env.GoEnvironmentWithDefault("development")
// T is buffalo Translator
T *i18n.Translator
)
@@ -59,25 +62,48 @@ const Service = "olympus"
// App is where all routes and middleware for buffalo should be defined.
// This is the nerve center of your application.
func App(config *AppConfig) (*buffalo.App, error) {
port := env.Port(":3001")
func App(conf *config.Config) (*buffalo.App, error) {
// ENV is used to help switch settings based on where the
// application is being run. Default is "development".
ENV := conf.GoEnv
port := conf.Olympus.Port
w, err := getWorker(config.Storage, config.EventLog)
storage, err := GetStorage(conf.Olympus.StorageType, conf.Storage)
if err != nil {
return nil, err
}
if conf.Storage == nil || conf.Storage.Mongo == nil {
return nil, fmt.Errorf("A valid Mongo configuration is required to create the event log")
}
eLog, err := GetEventLog(conf.Storage.Mongo.URL, conf.Storage.Mongo.CertPath, conf.Storage.Mongo.TimeoutDuration())
if err != nil {
return nil, fmt.Errorf("error creating eventlog (%s)", err)
}
wConf := workerConfig{
store: storage,
eLog: eLog,
wType: conf.Olympus.WorkerType,
maxConc: conf.MaxConcurrency,
maxFails: conf.MaxWorkerFails,
downloadTimeout: conf.TimeoutDuration(),
redisEndpoint: conf.Olympus.RedisQueueAddress,
}
w, err := getWorker(wConf)
if err != nil {
return nil, err
}
lvl, err := env.LogLevel()
logLvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return nil, err
}
lggr := log.New(env.CloudRuntime(), lvl)
lggr := log.New(conf.CloudRuntime, logLvl)
blvl, err := env.BuffaloLogLevel()
bLogLvl, err := logrus.ParseLevel(conf.BuffaloLogLevel)
if err != nil {
return nil, err
}
blggr := log.Buffalo(blvl)
blggr := log.Buffalo(bLogLvl)
app := buffalo.New(buffalo.Options{
Addr: port,
@@ -102,7 +128,7 @@ func App(config *AppConfig) (*buffalo.App, error) {
}
// Protect against CSRF attacks. https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)
// Remove to disable this.
if env.EnableCSRFProtection() {
if conf.EnableCSRFProtection {
csrfMiddleware := csrf.New
app.Use(csrfMiddleware)
}
@@ -120,23 +146,23 @@ func App(config *AppConfig) (*buffalo.App, error) {
}
app.Use(T.Middleware())
app.GET("/diff/{lastID}", diffHandler(config.Storage, config.EventLog))
app.GET("/feed/{lastID}", feedHandler(config.Storage))
app.GET("/eventlog/{sequence_id}", eventlogHandler(config.EventLog))
app.GET("/diff/{lastID}", diffHandler(storage, eLog))
app.GET("/feed/{lastID}", feedHandler(storage))
app.GET("/eventlog/{sequence_id}", eventlogHandler(eLog))
app.POST("/cachemiss", cachemissHandler(w))
app.POST("/push", pushNotificationHandler(w))
app.GET("/healthz", healthHandler)
// Download Protocol
goBin := env.GoBinPath()
goBin := conf.GoBinary
fs := afero.NewOsFs()
mf, err := module.NewGoGetFetcher(goBin, fs)
if err != nil {
return nil, err
}
st := stash.New(mf, config.Storage)
st := stash.New(mf, storage)
dpOpts := &download.Opts{
Storage: config.Storage,
Storage: storage,
Stasher: st,
GoBinPath: goBin,
Fs: fs,
@@ -151,46 +177,45 @@ func App(config *AppConfig) (*buffalo.App, error) {
return app, nil
}
func getWorker(store storage.Backend, eLog eventlog.Eventlog) (worker.Worker, error) {
workerType := env.OlympusBackgroundWorkerType()
switch workerType {
func getWorker(wConf workerConfig) (worker.Worker, error) {
switch wConf.wType {
case "redis":
return registerRedis(store, eLog)
return registerRedis(wConf)
case "memory":
return registerInMem(store, eLog)
return registerInMem(wConf)
default:
stdlog.Printf("Provided background worker type %s. Expected redis|memory. Defaulting to memory", workerType)
return registerInMem(store, eLog)
stdlog.Printf("Provided background worker type %s. Expected redis|memory. Defaulting to memory", wConf.wType)
return registerInMem(wConf)
}
}
func registerInMem(store storage.Backend, eLog eventlog.Eventlog) (worker.Worker, error) {
func registerInMem(wConf workerConfig) (worker.Worker, error) {
w := worker.NewSimple()
if err := w.Register(PushNotificationHandlerName, GetProcessPushNotificationJob(store, eLog)); err != nil {
if err := w.Register(PushNotificationHandlerName, GetProcessPushNotificationJob(wConf.store, wConf.eLog, wConf.downloadTimeout)); err != nil {
return nil, err
}
return w, nil
}
func registerRedis(store storage.Backend, eLog eventlog.Eventlog) (worker.Worker, error) {
port := env.OlympusRedisQueuePortWithDefault(":6379")
func registerRedis(wConf workerConfig) (worker.Worker, error) {
addr := wConf.redisEndpoint
w := gwa.New(gwa.Options{
Pool: &redis.Pool{
MaxActive: 5,
MaxIdle: 5,
Wait: true,
Dial: func() (redis.Conn, error) {
return redis.Dial("tcp", port)
return redis.Dial("tcp", addr)
},
},
Name: OlympusWorkerName,
MaxConcurrency: env.AthensMaxConcurrency(),
MaxConcurrency: wConf.maxConc,
})
opts := work.JobOptions{
SkipDead: true,
MaxFails: env.WorkerMaxFails(),
MaxFails: wConf.maxFails,
}
return w, w.RegisterWithOptions(PushNotificationHandlerName, opts, GetProcessPushNotificationJob(store, eLog))
return w, w.RegisterWithOptions(PushNotificationHandlerName, opts, GetProcessPushNotificationJob(wConf.store, wConf.eLog, wConf.downloadTimeout))
}
+17 -11
View File
@@ -1,23 +1,29 @@
package actions
import (
"github.com/gomods/athens/pkg/config/env"
"time"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/eventlog"
"github.com/gomods/athens/pkg/eventlog/mongo"
)
// GetEventLog returns implementation of eventlog.EventLog
func GetEventLog() (eventlog.Eventlog, error) {
connectionString := env.MongoConnectionString()
certPath := env.MongoCertPath()
l, err := mongo.NewLog(connectionString, certPath)
return l, err
func GetEventLog(mongoURL string, certPath string, timeout time.Duration) (eventlog.Eventlog, error) {
const op = "actions.GetEventLog"
l, err := mongo.NewLog(mongoURL, certPath, timeout)
if err != nil {
return nil, errors.E(op, err)
}
return l, nil
}
// NewCacheMissesLog returns impl. of eventlog.Appender
func NewCacheMissesLog() (eventlog.Appender, error) {
connectionString := env.MongoConnectionString()
certPath := env.MongoCertPath()
l, err := mongo.NewLogWithCollection(connectionString, certPath, "cachemisseslog")
return l, err
func NewCacheMissesLog(mongoURL string, certPath string, timeout time.Duration) (eventlog.Appender, error) {
const op = "actions.NewCacheMissesLog"
l, err := mongo.NewLogWithCollection(mongoURL, certPath, "cachemisseslog", timeout)
if err != nil {
return nil, errors.E(op, err)
}
return l, nil
}
+4 -5
View File
@@ -5,7 +5,6 @@ import (
"log"
"time"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/eventlog"
"github.com/gomods/athens/pkg/module"
"github.com/gomods/athens/pkg/storage"
@@ -23,10 +22,10 @@ import (
// - Delete operation adds tombstone to module metadata k/v store
//
// Both could be fixed by putting each 'for' loop into a (global) critical section
func mergeDB(ctx context.Context, originURL string, diff dbDiff, eLog eventlog.Eventlog, storage storage.Backend, downloader module.Downloader) error {
func mergeDB(ctx context.Context, originURL string, diff dbDiff, eLog eventlog.Eventlog, storage storage.Backend, downloader module.Downloader, downloadTimeout time.Duration) error {
var errors error
for _, added := range diff.Added {
if err := add(ctx, added, originURL, eLog, storage, downloader); err != nil {
if err := add(ctx, added, originURL, eLog, storage, downloader, downloadTimeout); err != nil {
errors = multierror.Append(errors, err)
}
}
@@ -43,7 +42,7 @@ func mergeDB(ctx context.Context, originURL string, diff dbDiff, eLog eventlog.E
return errors
}
func add(ctx context.Context, event eventlog.Event, originURL string, eLog eventlog.Eventlog, storage storage.Backend, downloader module.Downloader) error {
func add(ctx context.Context, event eventlog.Event, originURL string, eLog eventlog.Eventlog, storage storage.Backend, downloader module.Downloader, downloadTimeout time.Duration) error {
if _, err := eLog.ReadSingle(event.Module, event.Version); err != nil {
// the module/version already exists, is deprecated, or is
// tombstoned, so nothing to do
@@ -51,7 +50,7 @@ func add(ctx context.Context, event eventlog.Event, originURL string, eLog event
}
// download code from the origin
data, err := downloader(ctx, env.Timeout(), originURL, event.Module, event.Version)
data, err := downloader(ctx, downloadTimeout, originURL, event.Module, event.Version)
if err != nil {
log.Printf("error downloading new module %s/%s from %s (%s)", event.Module, event.Version, originURL, err)
return err
+3 -2
View File
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"errors"
"time"
"github.com/gobuffalo/buffalo"
"github.com/gobuffalo/buffalo/worker"
@@ -35,7 +36,7 @@ func pushNotificationHandler(w worker.Worker) func(c buffalo.Context) error {
}
// GetProcessPushNotificationJob processes queue of push notifications
func GetProcessPushNotificationJob(storage storage.Backend, eLog eventlog.Eventlog) worker.Handler {
func GetProcessPushNotificationJob(storage storage.Backend, eLog eventlog.Eventlog, downloadTimeout time.Duration) worker.Handler {
return func(args worker.Args) (err error) {
// TODO: background for now
ctx := context.Background()
@@ -47,7 +48,7 @@ func GetProcessPushNotificationJob(storage storage.Backend, eLog eventlog.Eventl
if err != nil {
return err
}
return mergeDB(ctx, pn.OriginURL, *diff, eLog, storage, module.Download)
return mergeDB(ctx, pn.OriginURL, *diff, eLog, storage, module.Download, downloadTimeout)
}
}
+11 -9
View File
@@ -3,7 +3,8 @@ package actions
import (
"fmt"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/storage"
"github.com/gomods/athens/pkg/storage/fs"
"github.com/gomods/athens/pkg/storage/mem"
@@ -12,25 +13,26 @@ import (
)
// GetStorage returns storage.Backend implementation
func GetStorage() (storage.Backend, error) {
storageType := env.StorageTypeWithDefault("memory")
func GetStorage(storageType string, storageConfig *config.StorageConfig) (storage.Backend, error) {
const op errors.Op = "actions.GetStorage"
switch storageType {
case "memory":
return mem.NewStorage()
case "disk":
rootLocation, err := env.DiskRoot()
if err != nil {
return nil, err
if storageConfig.Disk == nil {
return nil, errors.E(op, "Invalid Disk Storage Configuration")
}
rootLocation := storageConfig.Disk.RootPath
s, err := fs.NewStorage(rootLocation, afero.NewOsFs())
if err != nil {
return nil, fmt.Errorf("could not create new storage from os fs (%s)", err)
}
return s, nil
case "mongo":
connectionString := env.MongoConnectionString()
certPath := env.MongoCertPath()
return mongo.NewStorageWithCert(connectionString, certPath)
if storageConfig.Mongo == nil {
return nil, errors.E(op, "Invalid Mongo Storage Configuration")
}
return mongo.NewStorage(storageConfig.Mongo)
default:
return nil, fmt.Errorf("storage type %s is unknown", storageType)
}
+16 -35
View File
@@ -1,16 +1,28 @@
package main
import (
"fmt"
"flag"
"log"
"path/filepath"
"github.com/gobuffalo/buffalo"
"github.com/gomods/athens/cmd/olympus/actions"
"github.com/gomods/athens/pkg/storage"
"github.com/gomods/athens/pkg/config"
)
var (
configFile = flag.String("config_file", filepath.Join("..", "..", "config.toml"), "The path to the config file")
)
func main() {
app, err := setupApp()
flag.Parse()
if configFile == nil {
log.Fatal("Invalid config file path provided")
}
conf, err := config.ParseConfigFile(*configFile)
if err != nil {
log.Fatal(err)
}
app, err := actions.App(conf)
if err != nil {
log.Fatal(err)
}
@@ -19,34 +31,3 @@ func main() {
log.Fatal(err)
}
}
func setupApp() (*buffalo.App, error) {
storage, err := getStorage()
if err != nil {
log.Fatalf("error creating storage (%s)", err)
}
eLog, err := actions.GetEventLog()
if err != nil {
log.Fatalf("error creating eventlog (%s)", err)
}
cacheMissesLog, err := actions.NewCacheMissesLog()
if err != nil {
log.Fatalf("error creating cachemisses log (%s)", err)
}
config := actions.AppConfig{
Storage: storage,
EventLog: eLog,
CacheMissesLog: cacheMissesLog,
}
return actions.App(&config)
}
func getStorage() (storage.Backend, error) {
storage, err := actions.GetStorage()
if err != nil {
return nil, fmt.Errorf("error creating storage (%s)", err)
}
return storage, nil
}
+3 -2
View File
@@ -5,10 +5,11 @@ WORKDIR $GOPATH/src/github.com/gomods/athens
COPY . .
RUN CGO_ENABLED=0 go build -mod=vendor -o /bin/app ./cmd/proxy
RUN CGO_ENABLED=0 GO111MODULE=on go build -mod=vendor -o /bin/app ./cmd/proxy
RUN ./scripts/create_default_config.sh
ENV GO_ENV=production
EXPOSE 3000
ENTRYPOINT ["/bin/app"]
ENTRYPOINT /bin/app -config_file=config.toml
+8 -1
View File
@@ -1,9 +1,15 @@
package actions
import (
"path/filepath"
"testing"
"github.com/gobuffalo/suite"
"github.com/gomods/athens/pkg/config"
)
var (
testConfigFile = filepath.Join("..", "..", "..", "config.test.toml")
)
type ActionSuite struct {
@@ -11,7 +17,8 @@ type ActionSuite struct {
}
func Test_ActionSuite(t *testing.T) {
app, err := App()
conf := config.GetConfLogErr(testConfigFile, t)
app, err := App(conf)
if err != nil {
t.Fatal(err)
}
+24 -24
View File
@@ -12,21 +12,18 @@ import (
"github.com/gobuffalo/buffalo/middleware/ssl"
"github.com/gobuffalo/buffalo/render"
"github.com/gobuffalo/packr"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/log"
mw "github.com/gomods/athens/pkg/middleware"
"github.com/gomods/athens/pkg/module"
"github.com/rs/cors"
"github.com/sirupsen/logrus"
"github.com/unrolled/secure"
)
// Service is the name of the service that we want to tag our processes with
const Service = "proxy"
// ENV is used to help switch settings based on where the
// application is being run. Default is "development".
var ENV = env.GoEnvironmentWithDefault("development")
// T is the translator to use
var T *i18n.Translator
@@ -48,8 +45,11 @@ func init() {
// App is where all routes and middleware for buffalo
// should be defined. This is the nerve center of your
// application.
func App() (*buffalo.App, error) {
store, err := GetStorage()
func App(conf *config.Config) (*buffalo.App, error) {
// ENV is used to help switch settings based on where the
// application is being run. Default is "development".
ENV := conf.GoEnv
store, err := GetStorage(conf.Proxy.StorageType, conf.Storage)
if err != nil {
err = fmt.Errorf("error getting storage configuration (%s)", err)
return nil, err
@@ -57,19 +57,19 @@ func App() (*buffalo.App, error) {
// mount .netrc to home dir
// to have access to private repos.
initializeNETRC()
initializeNETRC(conf.Proxy.NETRCPath)
lvl, err := env.LogLevel()
logLvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return nil, err
}
lggr := log.New(env.CloudRuntime(), lvl)
lggr := log.New(conf.CloudRuntime, logLvl)
blvl, err := env.BuffaloLogLevel()
bLogLvl, err := logrus.ParseLevel(conf.BuffaloLogLevel)
if err != nil {
return nil, err
}
blggr := log.Buffalo(blvl)
blggr := log.Buffalo(bLogLvl)
app := buffalo.New(buffalo.Options{
Env: ENV,
@@ -78,9 +78,9 @@ func App() (*buffalo.App, error) {
},
SessionName: "_athens_session",
Logger: blggr,
Addr: env.Port(":3000"),
Addr: conf.Proxy.Port,
})
if prefix := env.AthensPathPrefix(); prefix != "" {
if prefix := conf.Proxy.PathPrefix; prefix != "" {
// certain Ingress Controllers (such as GCP Load Balancer)
// can not send custom headers and therefore if the proxy
// is running behind a prefix as well as some authentication
@@ -90,7 +90,7 @@ func App() (*buffalo.App, error) {
}
// Register exporter to export traces
exporter, err := observ.RegisterTraceExporter(Service)
exporter, err := observ.RegisterTraceExporter(Service, ENV)
if err != nil {
lggr.SystemErr(err)
} else {
@@ -100,7 +100,7 @@ func App() (*buffalo.App, error) {
// Automatically redirect to SSL
app.Use(ssl.ForceSSL(secure.Options{
SSLRedirect: env.ProxyForceSSL(),
SSLRedirect: conf.Proxy.ForceSSL,
SSLProxyHeaders: map[string]string{"X-Forwarded-Proto": "https"},
}))
@@ -111,7 +111,7 @@ func App() (*buffalo.App, error) {
initializeAuth(app)
// Protect against CSRF attacks. https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)
// Remove to disable this.
if env.EnableCSRFProtection() {
if conf.EnableCSRFProtection {
csrfMiddleware := csrf.New
app.Use(csrfMiddleware)
}
@@ -121,22 +121,22 @@ func App() (*buffalo.App, error) {
}
app.Use(T.Middleware())
if !env.FilterOff() {
mf := module.NewFilter()
app.Use(mw.NewFilterMiddleware(mf))
if !conf.Proxy.FilterOff {
mf := module.NewFilter(conf.FilterFile)
app.Use(mw.NewFilterMiddleware(mf, conf.Proxy.OlympusGlobalEndpoint))
}
// Having the hook set means we want to use it
if validatorHook, ok := env.ValidatorHook(); ok {
app.Use(mw.LogEntryMiddleware(mw.NewValidationMiddleware, lggr, validatorHook))
if vHook := conf.Proxy.ValidatorHook; vHook != "" {
app.Use(mw.LogEntryMiddleware(mw.NewValidationMiddleware, lggr, vHook))
}
user, pass, ok := env.BasicAuth()
user, pass, ok := conf.Proxy.BasicAuth()
if ok {
app.Use(basicAuth(user, pass))
}
if err := addProxyRoutes(app, store, lggr); err != nil {
if err := addProxyRoutes(app, store, lggr, conf.GoBinary, conf.GoGetWorkers, conf.ProtocolWorkers); err != nil {
err = fmt.Errorf("error adding proxy routes (%s)", err)
return nil, err
}
+5 -4
View File
@@ -2,7 +2,6 @@ package actions
import (
"github.com/gobuffalo/buffalo"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/download"
"github.com/gomods/athens/pkg/download/addons"
"github.com/gomods/athens/pkg/log"
@@ -16,6 +15,9 @@ func addProxyRoutes(
app *buffalo.App,
s storage.Backend,
l *log.Logger,
goBin string,
goGetWorkers int,
protocolWorkers int,
) error {
app.GET("/", proxyHomeHandler)
app.GET("/healthz", healthHandler)
@@ -42,13 +44,12 @@ func addProxyRoutes(
// 2. The singleflight passes the stash to its parent: stashpool.
// 3. The stashpool manages limiting concurrent requests and passes them to stash.
// 4. The plain stash.New just takes a request from upstream and saves it into storage.
goBin := env.GoBinPath()
fs := afero.NewOsFs()
mf, err := module.NewGoGetFetcher(goBin, fs)
if err != nil {
return err
}
st := stash.New(mf, s, stash.WithPool(env.GoGetWorkers()), stash.WithSingleflight)
st := stash.New(mf, s, stash.WithPool(goGetWorkers), stash.WithSingleflight)
dpOpts := &download.Opts{
Storage: s,
@@ -56,7 +57,7 @@ func addProxyRoutes(
GoBinPath: goBin,
Fs: fs,
}
dp := download.New(dpOpts, addons.WithPool(env.ProtocolWorkers()))
dp := download.New(dpOpts, addons.WithPool(protocolWorkers))
handlerOpts := &download.HandlerOpts{Protocol: dp, Logger: l, Engine: proxy}
download.RegisterHandlers(app, handlerOpts)
+4 -6
View File
@@ -5,22 +5,20 @@ import (
"log"
"path/filepath"
"github.com/gomods/athens/pkg/config/env"
"github.com/mitchellh/go-homedir"
)
// initializeNETRC checks if .netrc is at a pre-configured path
// and moves to ~/.netrc -- note that this will override whatever
// .netrc you have in your home directory.
func initializeNETRC() {
p := env.NETRCPath()
if p == "" {
func initializeNETRC(path string) {
if path == "" {
return
}
netrcBts, err := ioutil.ReadFile(p)
netrcBts, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalf("could not read %s: %v", p, err)
log.Fatalf("could not read %s: %v", path, err)
}
hdir, err := homedir.Dir()
+25 -34
View File
@@ -3,9 +3,9 @@ package actions
import (
"context"
"fmt"
"strings"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/storage"
"github.com/gomods/athens/pkg/storage/fs"
"github.com/gomods/athens/pkg/storage/gcp"
@@ -16,49 +16,40 @@ import (
)
// GetStorage returns storage backend based on env configuration
func GetStorage() (storage.Backend, error) {
storageType := env.StorageTypeWithDefault("memory")
var storageRoot string
var err error
func GetStorage(storageType string, storageConfig *config.StorageConfig) (storage.Backend, error) {
const op errors.Op = "actions.GetStorage"
switch storageType {
case "memory":
return mem.NewStorage()
case "mongo":
connectionString := env.MongoConnectionString()
certPath := env.MongoCertPath()
return mongo.NewStorageWithCert(connectionString, certPath)
case "disk":
storageRoot, err = env.DiskRoot()
if err != nil {
return nil, err
if storageConfig.Mongo == nil {
return nil, errors.E(op, "Invalid Mongo Storage Configuration")
}
s, err := fs.NewStorage(storageRoot, afero.NewOsFs())
return mongo.NewStorage(storageConfig.Mongo)
case "disk":
if storageConfig.Disk == nil {
return nil, errors.E(op, "Invalid Disk Storage Configuration")
}
rootLocation := storageConfig.Disk.RootPath
s, err := fs.NewStorage(rootLocation, afero.NewOsFs())
if err != nil {
return nil, fmt.Errorf("could not create new storage from os fs (%s)", err)
errStr := fmt.Sprintf("could not create new storage from os fs (%s)", err)
return nil, errors.E(op, errStr)
}
return s, nil
case "minio":
endpoint, err := env.MinioEndpoint()
if err != nil {
return nil, err
if storageConfig.Minio == nil {
return nil, errors.E(op, "Invalid Minio Storage Configuration")
}
accessKeyID, err := env.MinioAccessKeyID()
if err != nil {
return nil, err
}
secretAccessKey, err := env.MinioSecretAccessKey()
if err != nil {
return nil, err
}
bucketName := env.MinioBucketNameWithDefault("gomods")
useSSL := true
if useSSLVar := env.MinioSSLWithDefault("yes"); strings.ToLower(useSSLVar) == "no" {
useSSL = false
}
return minio.NewStorage(endpoint, accessKeyID, secretAccessKey, bucketName, useSSL)
return minio.NewStorage(storageConfig.Minio)
case "gcp":
return gcp.New(context.Background())
if storageConfig.GCP == nil {
return nil, errors.E(op, "Invalid GCP Storage Configuration")
}
if storageConfig.CDN == nil {
return nil, errors.E(op, "Invalid CDN Storage Configuration")
}
return gcp.New(context.Background(), storageConfig.GCP, storageConfig.CDN)
default:
return nil, fmt.Errorf("storage type %s is unknown", storageType)
}
+16 -1
View File
@@ -1,13 +1,28 @@
package main
import (
"flag"
"log"
"path/filepath"
"github.com/gomods/athens/cmd/proxy/actions"
"github.com/gomods/athens/pkg/config"
)
var (
configFile = flag.String("config_file", filepath.Join("..", "..", "config.toml"), "The path to the config file")
)
func main() {
app, err := actions.App()
flag.Parse()
if configFile == nil {
log.Fatal("Invalid config file path provided")
}
conf, err := config.ParseConfigFile(*configFile)
if err != nil {
log.Fatal(err)
}
app, err := actions.App(conf)
if err != nil {
log.Fatal(err)
}
+51 -4
View File
@@ -7,16 +7,38 @@
# Env override: GO_BINARY_PATH
GoBinary = "go"
# GoEnv returns the type of environment to run.
# GoEnv specifies the type of environment to run.
# Supported values are: 'development' and 'production'. Defaults to "development"
# Env override: GO_ENV
GoEnv = "development"
# GoGetWorkers specifies how many times you can concurrently
# go mod download, this is so that low performance instances
# can manage go get more sanely and not run out of disk or memory.
# Env override: ATHENS_GOGET_WORKERS
GoGetWorkers = 30
# ProtocolWorkers specifies how many concurrent
# requests can you handle at a time for all
# download protocol paths. This is different from
# GoGetWorkers in that you can potentially serve
# 30 requests to the Download Protocol but only 5
# at a time can stash a module from Upstream to Storage.
# Env override: ATHENS_PROTOCOL_WORKERS
ProtocolWorkers = 30
# LogLevel returns the system's exposure to internal logs. Defaults to debug.
# Supports all logrus log levels (https://github.com/Sirupsen/logrus#level-logging)
# Env override: ATHENS_LOG_LEVEL
LogLevel = "debug"
# BuffaloLogLevel returns the log level for logs
# emitted by Buffalo itself. This is different from our own
# LogLevel in this file because you might want info level
# for our codebase, but panic level for buffalo.
# Env override: BUFFALO_LOG_LEVEL
BuffaloLogLevel = "debug"
# CloudRuntime is the Cloud Provider on which the Proxy/Registry is running.
# Currently available options are "GCP", or "none". Defaults to "none"
# Env override: ATHENS_CLOUD_RUNTIME
@@ -62,7 +84,7 @@ EnableCSRFProtection = false
OlympusGlobalEndpoint = "http://localhost:3001"
# Redis queue for buffalo workers
# Defaults to ":6379"
# Env override: ATHENS_REDIS_QUEUE_PORT
# Env override: ATHENS_REDIS_QUEUE_ADDRESS
RedisQueueAddress = ":6379"
# Flag to turn off Proxy Filter middleware
# Defaults to true
@@ -74,6 +96,28 @@ EnableCSRFProtection = false
# Password for basic auth
# Env override: BASIC_AUTH_PASS
BasicAuthPass = ""
# Set to true to force an SSL redirect
# Env override: PROXY_FORCE_SSL
ForceSSL = false
# ValidatorHook specifies the endpoint to validate modules against
# Not used if left blank or not specified
# Env override: ATHENS_PROXY_VALIDATOR
ValidatorHook = ""
# PathPrefix specifies whether the Proxy
# should have a basepath. Certain proxies and services
# are distinguished based on subdomain, while others are based
# on path prefixes.
# Env override: ATHENS_PATH_PREFIX
PathPrefix = ""
# NETRCPath tells you where the .netrc path initially resides.
# This is so that you can mount the .netrc file to a secret location
# in the fs system and then move it ~/.netrc. In certain deployments
# like Kubernetes, we can't mount directly to ~ because it would then
# clean out whatever is already there as part of the image (such as
# .cache directory in the Go image).
# Env override: ATHENS_NETRC_PATH
NETRCPath = ""
[Olympus]
# StorageType sets the type of storage backend Olympus will use.
@@ -90,7 +134,7 @@ EnableCSRFProtection = false
WorkerType = "redis"
# Redis queue for buffalo workers
# Defaults to ":6379"
# Env override: OLYMPUS_REDIS_QUEUE_PORT
# Env override: OLYMPUS_REDIS_QUEUE_ADDRESS
RedisQueueAddress = ":6379"
[Storage]
@@ -140,7 +184,10 @@ EnableCSRFProtection = false
[Storage.Mongo]
# Full URL for mongo storage
# Env override: ATHENS_MONGO_STORAGE_URL
URL = "mongo.example.com"
URL = "mongodb://127.0.0.1:27017"
# Path to certificate to use for the mongo connection
# Env override: ATHENS_MONGO_CERT_PATH
CertPath = ""
# Timeout for networks calls made to Mongo in seconds
# Defaults to Global Timeout
# Env override: MONGO_CONN_TIMEOUT_SEC
+194
View File
@@ -0,0 +1,194 @@
# This is an example configuration with all supported properties explicitly set
# Most properties can be overriden with environment variables specified in this file
# Most properties also have defaults (mentioned in this file) if they are not set in either the config file or the corresponding environment variable
# GoBinary returns the path to the go binary to use. This value can be a name of a binary in your PATH, or the full path
# Defaults to "go"
# Env override: GO_BINARY_PATH
GoBinary = "go"
# GoEnv specifies the type of environment to run.
# Supported values are: 'development' and 'production'. Defaults to "development"
# Env override: GO_ENV
GoEnv = "development"
# GoGetWorkers specifies how many times you can concurrently
# go mod download, this is so that low performance instances
# can manage go get more sanely and not run out of disk or memory.
# Env override: ATHENS_GOGET_WORKERS
GoGetWorkers = 30
# ProtocolWorkers specifies how many concurrent
# requests can you handle at a time for all
# download protocol paths. This is different from
# GoGetWorkers in that you can potentially serve
# 30 requests to the Download Protocol but only 5
# at a time can stash a module from Upstream to Storage.
# Env override: ATHENS_PROTOCOL_WORKERS
ProtocolWorkers = 30
# LogLevel returns the system's exposure to internal logs. Defaults to debug.
# Supports all logrus log levels (https://github.com/Sirupsen/logrus#level-logging)
# Env override: ATHENS_LOG_LEVEL
LogLevel = "debug"
# BuffaloLogLevel returns the log level for logs
# emitted by Buffalo itself. This is different from our own
# LogLevel in this file because you might want info level
# for our codebase, but panic level for buffalo.
# Env override: BUFFALO_LOG_LEVEL
BuffaloLogLevel = "debug"
# CloudRuntime is the Cloud Provider on which the Proxy/Registry is running.
# Currently available options are "GCP", or "none". Defaults to "none"
# Env override: ATHENS_CLOUD_RUNTIME
CloudRuntime = "none"
# MaxConcurrency sets maximum level of concurrency
# Defaults to number of cores if not specified.
# Env override: ATHENS_MAX_CONCURRENCY
MaxConcurrency = 4
# The maximum number of failures for jobs submitted to buffalo workers
# Defaults to 5.
# Env override: ATHENS_MAX_WORKER_FAILS
MaxWorkerFails = 5
# The filename for the include exclude filter. Defaults to 'filter.conf'
# Env override: ATHENS_FILTER_FILE
FilterFile = "filter.conf"
# Timeout is the timeout for external network calls in seconds
# This value is used as the default for storage backends if they don't specify timeouts
# Defaults to 300
# Env override: ATHENS_TIMEOUT
Timeout = 1
# EnableCSRFProtection determines whether to enable CSRF protection.
# Defaults to false
# Env override: ATHENS_ENABLE_CSRF_PROTECTION
EnableCSRFProtection = false
[Proxy]
# StorageType sets the type of storage backend the proxy will use.
# Possible values are memory, disk, mongo, postgres, sqlite, cockroach, mysql
# Defaults to mongo
# Env override: ATHENS_STORAGE_TYPE
StorageType = "memory"
# Port sets the port the proxy listens on
# Env override: PORT
Port = ":3000"
# The endpoint for Olympus in case of a proxy cache miss
# Env override: OLYMPUS_GLOBAL_ENDPOINT
OlympusGlobalEndpoint = "http://localhost:3001"
# Redis queue for buffalo workers
# Defaults to ":6379"
# Env override: ATHENS_REDIS_QUEUE_ADDRESS
RedisQueueAddress = ":6379"
# Flag to turn off Proxy Filter middleware
# Defaults to true
# Env override: PROXY_FILTER_OFF
FilterOff = true
# Username for basic auth
# Env override: BASIC_AUTH_USER
BasicAuthUser = ""
# Password for basic auth
# Env override: BASIC_AUTH_PASS
BasicAuthPass = ""
# Set to true to force an SSL redirect
# Env override: PROXY_FORCE_SSL
ForceSSL = false
# ValidatorHook specifies the endpoint to validate modules against
# Not used if left blank or not specified
# Env override: ATHENS_PROXY_VALIDATOR
ValidatorHook = ""
# PathPrefix specifies whether the Proxy
# should have a basepath. Certain proxies and services
# are distinguished based on subdomain, while others are based
# on path prefixes.
# Env override: ATHENS_PATH_PREFIX
PathPrefix = ""
# NETRCPath tells you where the .netrc path initially resides.
# This is so that you can mount the .netrc file to a secret location
# in the fs system and then move it ~/.netrc. In certain deployments
# like Kubernetes, we can't mount directly to ~ because it would then
# clean out whatever is already there as part of the image (such as
# .cache directory in the Go image).
# Env override: ATHENS_NETRC_PATH
NETRCPath = ""
[Olympus]
# StorageType sets the type of storage backend Olympus will use.
# Possible values are memory, disk, mongo, postgres, sqlite, cockroach, mysql
# Defaults to memory
# Env override: ATHENS_STORAGE_TYPE
StorageType = "memory"
# Port sets the port olympus listens on
# Env override: PORT
Port = ":3001"
# Background worker type. Possible values are memory and redis
# Defaults to redis
# Env override: OLYMPUS_BACKGROUND_WORKER_TYPE
WorkerType = "redis"
# Redis queue for buffalo workers
# Defaults to ":6379"
# Env override: OLYMPUS_REDIS_QUEUE_ADDRESS
RedisQueueAddress = ":6379"
[Storage]
# Only storage backends that are specified in Proxy.StorageType or Olympus.StorageType are required here
[Storage.CDN]
# Endpoint for CDN storage
# Env override: CDN_ENDPOINT
Endpoint = "cdn.example.com"
# Timeout for networks calls made to the CDN in seconds
# Defaults to Global Timeout
Timeout = 1
[Storage.Disk]
# RootPath is the Athens Disk Root folder
# Env override: ATHENS_DISK_STORAGE_ROOT
RootPath = "/path/on/disk"
[Storage.GCP]
# ProjectID to use for GCP Storage
# Env overide: GOOGLE_CLOUD_PROJECT
ProjectID = "MY_GCP_PROJECT_ID"
# Bucket to use for GCP Storage
# Env override: ATHENS_STORAGE_GCP_BUCKET
Bucket = "MY_GCP_BUCKET"
# Timeout for networks calls made to GCP in seconds
# Defaults to Global Timeout
Timeout = 1
[Storage.Minio]
# Endpoint for Minio storage
# Env override: ATHENS_MINIO_ENDPOINT
Endpoint = "minio.example.com"
# Access Key for Minio storage
# Env override: ATHENS_MINIO_ACCESS_KEY_ID
Key = "MY_KEY"
# Secret Key for Minio storage
# Env override: ATHENS_MINIO_SECRET_ACCESS_KEY
Secret = "MY_SECRET"
# Timeout for networks calls made to Minio in seconds
# Defaults to Global Timeout
Timeout = 1
# Enable SSL for Minio connections
# Defaults to true
# Env override: ATHENS_MINIO_USE_SSL
EnableSSL = true
# Minio Bucket to use for storage
# Defaults to gomods
# Env override: ATHENS_MINIO_BUCKET_NAME
Bucket = "gomods"
[Storage.Mongo]
# Full URL for mongo storage
# Env override: ATHENS_MONGO_STORAGE_URL
URL = "mongodb://127.0.0.1:27017"
# Path to certificate to use for the mongo connection
# Env override: ATHENS_MONGO_CERT_PATH
CertPath = ""
# Timeout for networks calls made to Mongo in seconds
# Defaults to Global Timeout
# Env override: MONGO_CONN_TIMEOUT_SEC
Timeout = 1
+2 -2
View File
@@ -59,7 +59,7 @@ $ mkdir -p $(go env GOPATH)/src/github.com/gomods
$ cd $(go env GOPATH)/src/github.com/gomods
$ git clone https://github.com/gomods/athens.git
$ cd athens
$ GO111MODULE=off go run ./cmd/proxy &
$ GO111MODULE=off go run ./cmd/proxy -config_file=config.example.toml &
[1] 25243
INFO[0000] Starting application at 127.0.0.1:3000
```
@@ -73,7 +73,7 @@ $ cd "$(go env GOPATH)\src\github.com\gomods"
$ git clone https://github.com/gomods/athens.git
$ cd athens
$ $env:GO111MODULE = "off"
$ Start-Process -NoNewWindow go "run .\cmd\proxy"
$ Start-Process -NoNewWindow go "run .\cmd\proxy -config_file=config.example.toml"
[1] 25243
INFO[0000] Starting application at 127.0.0.1:3000
```
+3 -1
View File
@@ -9,6 +9,7 @@ require (
github.com/BurntSushi/toml v0.3.0
github.com/aws/aws-sdk-go v1.15.24
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
github.com/bketelsen/buffet v0.1.5
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/codegangsta/negroni v0.3.0 // indirect
github.com/fatih/color v1.7.0
@@ -42,6 +43,7 @@ require (
github.com/mitchellh/go-homedir v1.0.0
github.com/onsi/ginkgo v1.6.0 // indirect
github.com/onsi/gomega v1.4.1 // indirect
github.com/opentracing/opentracing-go v1.0.2
github.com/prometheus/client_golang v0.8.0 // indirect
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect
@@ -56,7 +58,7 @@ require (
github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245
github.com/uber-go/atomic v1.3.2 // indirect
github.com/uber/jaeger-client-go v2.14.0+incompatible
github.com/uber/jaeger-lib v1.5.0
github.com/uber/jaeger-lib v1.5.0 // indirect
github.com/unrolled/secure v0.0.0-20180618144512-8287f3899c8e
go.opencensus.io v0.15.0
go.uber.org/atomic v1.3.2 // indirect
+4
View File
@@ -17,6 +17,8 @@ github.com/aws/aws-sdk-go v1.15.24 h1:xLAdTA/ore6xdPAljzZRed7IGqQgC+nY+ERS5vaj4R
github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bketelsen/buffet v0.1.5 h1:KQ+f1YK8APG9R3V/aBibRptLOkIJmDtHF/cvXDHCdOw=
github.com/bketelsen/buffet v0.1.5/go.mod h1:dN5bkZP+hUjYa/ky3N6xfKg3S/94wr5MeaKIgBaEIqY=
github.com/cockroachdb/cockroach-go v0.0.0-20180212155653-59c0560478b7 h1:XFqp7VFIbbJO1hlpGbzo45NVYWVIM2eMD9MAxrOTVzU=
github.com/cockroachdb/cockroach-go v0.0.0-20180212155653-59c0560478b7/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
@@ -171,6 +173,8 @@ github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.1 h1:PZSj/UFNaVp3KxrzHOcS7oyuWA7LoOY/77yCTEFu21U=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+1 -1
View File
@@ -4,8 +4,8 @@ import "net/url"
// CDNConfig specifies the properties required to use a CDN as the storage backend
type CDNConfig struct {
TimeoutConf
Endpoint string `envconfig:"CDN_ENDPOINT"`
Timeout int `validate:"required"`
}
// CDNEndpointWithDefault returns CDN endpoint if set
-15
View File
@@ -1,15 +0,0 @@
package env
import (
"os"
)
// BasicAuth returns BASIC_AUTH_USER
// and BASIC_AUTH_PASSWORD and ok if either
// of them are empty
func BasicAuth() (user, pass string, ok bool) {
user = os.Getenv("BASIC_AUTH_USER")
pass = os.Getenv("BASIC_AUTH_PASS")
ok = user != "" && pass != ""
return user, pass, ok
}
-23
View File
@@ -1,23 +0,0 @@
package env
import (
"net/url"
"github.com/gobuffalo/envy"
)
// CDNEndpointWithDefault returns CDN endpoint if set
// if not it should default to clouds default blob storage endpoint e.g
func CDNEndpointWithDefault(value *url.URL) *url.URL {
rawURI, err := envy.MustGet("CDN_ENDPOINT")
if err != nil {
return value
}
uri, err := url.Parse(rawURI)
if err != nil {
return value
}
return uri
}
-11
View File
@@ -1,11 +0,0 @@
package env
import (
"github.com/gobuffalo/envy"
)
// CloudRuntime returns the Cloud Provider
// underneath which the Proxy/Registry is running.
func CloudRuntime() string {
return envy.Get("ATHENS_CLOUD_RUNTIME", "none")
}
-17
View File
@@ -1,17 +0,0 @@
package env
import (
"strconv"
"github.com/gobuffalo/envy"
)
// EnableCSRFProtection determines whether to enable CSRF protection
func EnableCSRFProtection() bool {
boolStr := envy.Get("ATHENS_ENABLE_CSRF_PROTECTION", "false")
enable, err := strconv.ParseBool(boolStr)
if err != nil {
return false
}
return enable
}
-19
View File
@@ -1,19 +0,0 @@
package env
import "github.com/gobuffalo/envy"
const defaultConfigurationFileName = "filter.conf"
// FilterConfigurationFileName specifies file name for include exclude private filter
// If no filename is specified it fallbacks to 'filter.conf'
func FilterConfigurationFileName() string {
return envy.Get("ATHENS_FILTER_FILENAME", defaultConfigurationFileName)
}
// FilterOff checks PROXY_FILTER_OFF env and returns
// true of it's equal to "true", otherwise false always.
// It defaults to "true" until Olympus is the default
// place to grab modules before the Proxy.
func FilterOff() bool {
return envy.Get("PROXY_FILTER_OFF", "true") == "true"
}
-23
View File
@@ -1,23 +0,0 @@
package env
import (
"fmt"
"github.com/gobuffalo/envy"
)
// GCPBucketName returns Google Cloud Storage bucket name defined by ATHENS_STORAGE_GCP_BUCKET
func GCPBucketName() (string, error) {
env, err := envy.MustGet("ATHENS_STORAGE_GCP_BUCKET")
if err != nil {
return "", fmt.Errorf("missing Google Cloud storage bucket name: %s", err)
}
return env, nil
}
// GCPProjectID returns the project id on which the project
// is running or the cloud storage is using.
func GCPProjectID() string {
return envy.Get("GOOGLE_CLOUD_PROJECT", "")
}
-59
View File
@@ -1,59 +0,0 @@
package env
import (
"os"
"strconv"
"github.com/gobuffalo/envy"
)
// GoEnvironmentWithDefault returns environment used.
// Supported values are: 'development' and 'production'
func GoEnvironmentWithDefault(value string) string {
return envy.Get("GO_ENV", value)
}
// GoBinPath returns the path to the go binary to use, defined by
// GO_BINARY_PATH. This value can be a name on the PATH, or the full path
func GoBinPath() string {
return envy.Get("GO_BINARY_PATH", "go")
}
// GoGetWorkers returns how many times you can concurrently
// go mod download, this is so that low performance instances
// can manage go get more sanely and not run out of disk or memory.
func GoGetWorkers() int {
defaultNum := 30 // 3 * cmd/go's worker count.
str := os.Getenv("ATHENS_GOGET_WORKERS")
if str == "" {
return defaultNum
}
num, err := strconv.Atoi(str)
if err != nil {
return defaultNum
}
return num
}
// ProtocolWorkers returns how many concurrent
// requests can you handle at a time for all
// download protocol paths. This is different from
// GoGetWorkers in that you can potentially serve
// 30 requests to the Download Protocol but only 5
// at a time can stash a module from Upstream to Storage.
func ProtocolWorkers() int {
defaultNum := 30
str := os.Getenv("ATHENS_PROTOCOL_WORKERS")
if str == "" {
return defaultNum
}
num, err := strconv.Atoi(str)
if err != nil {
return defaultNum
}
return num
}
-23
View File
@@ -1,23 +0,0 @@
package env
import (
"github.com/gobuffalo/envy"
"github.com/sirupsen/logrus"
)
// LogLevel returns the system's
// exposure to internal logs. Defaults
// to debug.
func LogLevel() (logrus.Level, error) {
lvlStr := envy.Get("ATHENS_LOG_LEVEL", "debug")
return logrus.ParseLevel(lvlStr)
}
// BuffaloLogLevel returns the log level for logs
// emitted by Buffalo itself. This is different from our own
// LogLevel in this file because you might want info level
// for our codebase, but panic level for buffalo.
func BuffaloLogLevel() (logrus.Level, error) {
lvlStr := envy.Get("BUFFALO_LOG_LEVEL", "debug")
return logrus.ParseLevel(lvlStr)
}
-47
View File
@@ -1,47 +0,0 @@
package env
import (
"fmt"
"github.com/gobuffalo/envy"
)
// MinioEndpoint returns Minio endpoing URI defined by ATHENS_MINIO_ENDPOINT.
func MinioEndpoint() (string, error) {
env, err := envy.MustGet("ATHENS_MINIO_ENDPOINT")
if err != nil {
return "", fmt.Errorf("missing minio endpoint: %s", err)
}
return env, nil
}
// MinioAccessKeyID returns Minio access key ID defined by ATHENS_MINIO_ACCESS_KEY_ID.
func MinioAccessKeyID() (string, error) {
env, err := envy.MustGet("ATHENS_MINIO_ACCESS_KEY_ID")
if err != nil {
return "", fmt.Errorf("missing minio access key ID: %s", err)
}
return env, nil
}
// MinioSecretAccessKey returns Minio secret access key defined by ATHENS_MINIO_SECRET_ACCESS_KEY.
func MinioSecretAccessKey() (string, error) {
env, err := envy.MustGet("ATHENS_MINIO_SECRET_ACCESS_KEY")
if err != nil {
return "", fmt.Errorf("missing minio secret access key ID: %s", err)
}
return env, nil
}
// MinioBucketNameWithDefault returns bucket name used with Minio. Defined by ATHENS_MINIO_BUCKET_NAME.
func MinioBucketNameWithDefault(value string) string {
return envy.Get("ATHENS_MINIO_BUCKET_NAME", value)
}
// MinioSSLWithDefault returns flag whether or not SSL should be used with Minio. Defined by ATHENS_MINIO_USE_SSL.
func MinioSSLWithDefault(value string) string {
return envy.Get("ATHENS_MINIO_USE_SSL", value)
}
-77
View File
@@ -1,77 +0,0 @@
package env
import (
"fmt"
"strconv"
"time"
"github.com/gobuffalo/envy"
)
// MongoConnectionString returns Athens Mongo Storage connection string defined by ATHENS_MONGO_CONNECTION_STRING
func MongoConnectionString() string {
return envy.Get("ATHENS_MONGO_CONNECTION_STRING", "mongodb://127.0.0.1:27017")
}
// MongoCertPath returns Athens Mongo Storage cert path string defined by ATHENS_MONGO_CERT_PATH
func MongoCertPath() string {
env := envy.Get("ATHENS_MONGO_CERT_PATH", "")
return env
}
// MongoHost returns Athens Mongo host defined by MONGO_HOST
func MongoHost() (string, error) {
env, err := envy.MustGet("MONGO_HOST")
if err != nil {
return "", fmt.Errorf("missing mongo host: %s", err)
}
return env, nil
}
// MongoPort returns Athens Mongo port defined by MONGO_PORT
func MongoPort() (string, error) {
env, err := envy.MustGet("MONGO_PORT")
if err != nil {
return "", fmt.Errorf("missing mongo port: %s", err)
}
return env, nil
}
// MongoUser returns Athens Mongo Storage user defined by MONGO_USER
func MongoUser() (string, error) {
env, err := envy.MustGet("MONGO_USER")
if err != nil {
return "", fmt.Errorf("missing mongo user: %s", err)
}
return env, nil
}
// MongoPassword returns Athens Mongo Storage user password defined by MONGO_PASSWORD
func MongoPassword() (string, error) {
env, err := envy.MustGet("MONGO_PASSWORD")
if err != nil {
return "", fmt.Errorf("missing mongo user password: %s", err)
}
return env, nil
}
// MongoConnectionTimeoutSecWithDefault returns Athens Mongo Storage connection timeout defined by MONGO_CONN_TIMEOUT_SEC.
// Values are in seconds.
func MongoConnectionTimeoutSecWithDefault(defTimeout int) time.Duration {
timeoutConf := envy.Get("MONGO_CONN_TIMEOUT_SEC", strconv.Itoa(defTimeout))
timeout, err := strconv.ParseInt(timeoutConf, 10, 32)
if err != nil {
return time.Duration(defTimeout) * time.Second
}
return time.Duration(timeout) * time.Second
}
// MongoSSLWithDefault returns Athens Mongo Storage SSL flag defined by MONGO_SSL.
// Defines whether or not SSL should be used.
func MongoSSLWithDefault(value string) string {
return envy.Get("MONGO_SSL", value)
}
-15
View File
@@ -1,15 +0,0 @@
package env
import (
"os"
)
// NETRCPath tells you where the .netrc path initially resides.
// This is so that you can mount the .netrc file to a secret location
// in the fs system and then move it ~/.netrc. In certain deployments
// like Kubernetes, we can't mount directly to ~ because it would then
// clean out whatever is already there as part of the image (such as
// .cache directory in the Go image).
func NETRCPath() string {
return os.Getenv("ATHENS_NETRC_PATH")
}
-57
View File
@@ -1,57 +0,0 @@
package env
import (
"runtime"
"strconv"
"github.com/gobuffalo/envy"
)
const (
// OlympusGlobalEndpoint is a default olympus DNS address
OlympusGlobalEndpoint = "http://localhost:3001"
)
// OlympusGlobalEndpointWithDefault returns Olympus global endpoint defined by OLYMPUS_GLOBAL_ENDPOINT.
func OlympusGlobalEndpointWithDefault(value string) string {
return envy.Get("OLYMPUS_GLOBAL_ENDPOINT", value)
}
// GetOlympusEndpoint returns global endpoint with override in mind
func GetOlympusEndpoint() string {
return OlympusGlobalEndpointWithDefault(OlympusGlobalEndpoint)
}
// AthensMaxConcurrency retrieves maximal level of concurrency based on ATHENS_MAX_CONCURRENCY.
// Defaults to number of cores if env is not set.
func AthensMaxConcurrency() int {
defaultMaxConcurrency := runtime.NumCPU()
maxConcurrencyEnv, err := envy.MustGet("ATHENS_MAX_CONCURRENCY")
if err != nil {
return defaultMaxConcurrency
}
mc, err := strconv.Atoi(maxConcurrencyEnv)
if err != nil {
return defaultMaxConcurrency
}
return mc
}
// WorkerMaxFails retrieves maximal level of concurrency based on ATHENS_WORKER_MAX_FAILS.
// Defaults to 5.
func WorkerMaxFails() uint {
defaultMaxFails := uint(5)
maxFailsEnv, err := envy.MustGet("ATHENS_WORKER_MAX_FAILS")
if err != nil {
return defaultMaxFails
}
mc, err := strconv.Atoi(maxFailsEnv)
if err != nil {
return defaultMaxFails
}
return uint(mc)
}
-13
View File
@@ -1,13 +0,0 @@
package env
import (
"os"
)
// AthensPathPrefix returns whether the Proxy (or Olympus)
// should have a basepath. Certain proxies and services
// are distinguished based on subdomain, while others are based
// on path prefixes.
func AthensPathPrefix() string {
return os.Getenv("ATHENS_PATH_PREFIX")
}
-10
View File
@@ -1,10 +0,0 @@
package env
import "github.com/gobuffalo/envy"
// Port returns the PORT env var that a server (Olympus/Zeus) should
// run on. Buffalo uses the PORT environment so this keeps it consistent
// but can also be used directly as a Buffalo service option
func Port(value string) string {
return envy.Get("PORT", value)
}
-20
View File
@@ -1,20 +0,0 @@
package env
import "github.com/gobuffalo/envy"
// RedisQueuePortWithDefault returns Redis queue port used by workers defined by ATHENS_REDIS_QUEUE_PORT.
// Standard port is 6379
func RedisQueuePortWithDefault(value string) string {
return envy.Get("ATHENS_REDIS_QUEUE_PORT", value)
}
// OlympusRedisQueuePortWithDefault returns Redis queue port used by workers defined by OLYMPUS_REDIS_QUEUE_PORT.
// Standard port is 6379
func OlympusRedisQueuePortWithDefault(value string) string {
return envy.Get("OLYMPUS_REDIS_QUEUE_PORT", value)
}
// OlympusBackgroundWorkerType determines the background worker type used for Registry (Olympus). (redis, memory) Default - redis
func OlympusBackgroundWorkerType() string {
return envy.Get("OLYMPUS_BACKGROUND_WORKER_TYPE", "redis")
}
-13
View File
@@ -1,13 +0,0 @@
package env
import (
"os"
)
// ProxyForceSSL returns true if the PROXY_FORCE_SSL
// env is set to "true", otherwise defaults to false.
// This is used to make sure the Proxy would redirect
// to https on any request.
func ProxyForceSSL() bool {
return os.Getenv("PROXY_FORCE_SSL") == "true"
}
-29
View File
@@ -1,29 +0,0 @@
package env
import (
"fmt"
"github.com/gobuffalo/envy"
)
// DiskRoot returns Athens Mongo Disk Root folder defined by ATHENS_DISK_STORAGE_ROOT
func DiskRoot() (string, error) {
env, err := envy.MustGet("ATHENS_DISK_STORAGE_ROOT")
if err != nil {
return "", fmt.Errorf("missing disk root: %s", err)
}
return env, nil
}
// StorageType returns storage type used by Athens with error if env is not set.
// Possible values are memory, disk, mongo
func StorageType() (string, error) {
return envy.MustGet("ATHENS_STORAGE_TYPE")
}
// StorageTypeWithDefault returns storage type used by Athens with default value if env is not set.
// Possible values are memory, disk, mongo
func StorageTypeWithDefault(value string) string {
return envy.Get("ATHENS_STORAGE_TYPE", value)
}
-19
View File
@@ -1,19 +0,0 @@
package env
import (
"strconv"
"time"
"github.com/gobuffalo/envy"
)
// Timeout is timeout for external network calls
func Timeout() time.Duration {
t := envy.Get("ATHENS_TIMEOUT", "300")
timeout, err := strconv.Atoi(t)
if err != nil || timeout <= 0 {
return 300 * time.Second
}
return time.Second * time.Duration(timeout)
}
-18
View File
@@ -1,18 +0,0 @@
package env
import "github.com/gobuffalo/envy"
const noProxyValidator = ""
// ValidatorHook specifies the url of the endpoint to validate the modules agains
// It returns the endpoint (if it was configured) and a bool meaning that the endpoint return value is valid
func ValidatorHook() (endpoint string, found bool) {
endpoint = envy.Get("ATHENS_PROXY_VALIDATOR", noProxyValidator)
if endpoint == noProxyValidator {
found = false
endpoint = ""
} else {
found = true
}
return
}
+1 -1
View File
@@ -2,7 +2,7 @@ package config
// GCPConfig specifies the properties required to use GCP as the storage backend
type GCPConfig struct {
TimeoutConf
ProjectID string `envconfig:"GOOGLE_CLOUD_PROJECT"`
Bucket string `validate:"required" envconfig:"ATHENS_STORAGE_GCP_BUCKET"`
Timeout int `validate:"required"`
}
+1 -1
View File
@@ -2,10 +2,10 @@ package config
// MinioConfig specifies the properties required to use Minio as the storage backend
type MinioConfig struct {
TimeoutConf
Endpoint string `validate:"required" envconfig:"ATHENS_MINIO_ENDPOINT"`
Key string `validate:"required" envconfig:"ATHENS_MINIO_ACCESS_KEY_ID"`
Secret string `validate:"required" envconfig:"ATHENS_MINIO_SECRET_ACCESS_KEY"`
Timeout int `validate:"required"`
Bucket string `validate:"required" envconfig:"ATHENS_MINIO_BUCKET_NAME"`
EnableSSL bool `envconfig:"ATHENS_MINIO_USE_SSL"`
}
+3 -2
View File
@@ -2,6 +2,7 @@ package config
// MongoConfig specifies the properties required to use MongoDB as the storage backend
type MongoConfig struct {
URL string `validate:"required" envconfig:"ATHENS_MONGO_STORAGE_URL"`
Timeout int `validate:"required" envconfig:"MONGO_CONN_TIMEOUT_SEC"`
TimeoutConf
URL string `validate:"required" envconfig:"ATHENS_MONGO_STORAGE_URL"`
CertPath string `envconfig:"ATHENS_MONGO_CERT_PATH"`
}
+1 -1
View File
@@ -5,5 +5,5 @@ type OlympusConfig struct {
Port string `validate:"required" envconfig:"PORT"`
StorageType string `validate:"required" envconfig:"ATHENS_STORAGE_TYPE"`
WorkerType string `validate:"required" envconfig:"OLYMPUS_BACKGROUND_WORKER_TYPE"`
RedisQueueAddress string `validate:"required" envconfig:"OLYMPUS_REDIS_QUEUE_PORT"`
RedisQueueAddress string `validate:"required" envconfig:"OLYMPUS_REDIS_QUEUE_ADDRESS"`
}
+32 -1
View File
@@ -1,7 +1,10 @@
package config
import (
"fmt"
"path/filepath"
"runtime"
"testing"
"github.com/BurntSushi/toml"
"github.com/kelseyhightower/envconfig"
@@ -10,14 +13,17 @@ import (
// Config provides configuration values for all components
type Config struct {
TimeoutConf
GoEnv string `validate:"required" envconfig:"GO_ENV"`
GoBinary string `validate:"required" envconfig:"GO_BINARY_PATH"`
GoGetWorkers int `validate:"required" envconfig:"ATHENS_GOGET_WORKERS"`
ProtocolWorkers int `validate:"required" envconfig:"ATHENS_PROTOCOL_WORKERS"`
LogLevel string `validate:"required" envconfig:"ATHENS_LOG_LEVEL"`
BuffaloLogLevel string `validate:"required" envconfig:"BUFFALO_LOG_LEVEL"`
MaxConcurrency int `validate:"required" envconfig:"ATHENS_MAX_CONCURRENCY"`
MaxWorkerFails uint `validate:"required" envconfig:"ATHENS_MAX_WORKER_FAILS"`
CloudRuntime string `validate:"required" envconfig:"ATHENS_CLOUD_RUNTIME"`
FilterFile string `validate:"required" envconfig:"ATHENS_FILTER_FILE"`
Timeout int `validate:"required"`
EnableCSRFProtection bool `envconfig:"ATHENS_ENABLE_CSRF_PROTECTION"`
Proxy *ProxyConfig `validate:""`
Olympus *OlympusConfig `validate:""`
@@ -78,3 +84,28 @@ func validateConfig(c Config) error {
}
return nil
}
// GetConf accepts the path to a file, constructs an absolute path to the file,
// and attempts to parse it into a Config struct.
func GetConf(path string) (*Config, error) {
absPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("Unable to construct absolute path to test config file")
}
conf, err := ParseConfigFile(absPath)
if err != nil {
return nil, fmt.Errorf("Unable to parse test config file: %s", err.Error())
}
return conf, nil
}
// GetConfLogErr is similar to GetConf, except it logs a failure for the calling test
// if any errors are encountered
func GetConfLogErr(path string, t *testing.T) *Config {
c, err := GetConf(path)
if err != nil {
t.Fatalf("Unable to parse config file: %s", err.Error())
return nil
}
return c
}
+125 -54
View File
@@ -7,21 +7,65 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
)
const exampleConfigPath = "../../config.example.toml"
func compareConfigs(parsedConf *Config, expConf *Config, t *testing.T) {
opts := cmpopts.IgnoreTypes(StorageConfig{}, ProxyConfig{}, OlympusConfig{})
eq := cmp.Equal(parsedConf, expConf, opts)
if !eq {
t.Errorf("Parsed Example configuration did not match expected values. Expected: %+v. Actual: %+v", expConf, parsedConf)
}
eq = cmp.Equal(parsedConf.Proxy, expConf.Proxy)
if !eq {
t.Errorf("Parsed Example Proxy configuration did not match expected values. Expected: %+v. Actual: %+v", expConf.Proxy, parsedConf.Proxy)
}
eq = cmp.Equal(parsedConf.Olympus, expConf.Olympus)
if !eq {
t.Errorf("Parsed Example Olympus configuration did not match expected values. Expected: %+v. Actual: %+v", expConf.Olympus, parsedConf.Olympus)
}
compareStorageConfigs(parsedConf.Storage, expConf.Storage, t)
}
func compareStorageConfigs(parsedStorage *StorageConfig, expStorage *StorageConfig, t *testing.T) {
eq := cmp.Equal(parsedStorage.CDN, expStorage.CDN)
if !eq {
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.CDN, parsedStorage.CDN)
}
eq = cmp.Equal(parsedStorage.Mongo, expStorage.Mongo)
if !eq {
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.Mongo, parsedStorage.Mongo)
}
eq = cmp.Equal(parsedStorage.Minio, expStorage.Minio)
if !eq {
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.Minio, parsedStorage.Minio)
}
eq = cmp.Equal(parsedStorage.Disk, expStorage.Disk)
if !eq {
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.Disk, parsedStorage.Disk)
}
eq = cmp.Equal(parsedStorage.GCP, expStorage.GCP)
if !eq {
t.Errorf("Parsed Example Storage configuration did not match expected values. Expected: %+v. Actual: %+v", expStorage.GCP, parsedStorage.GCP)
}
}
func TestEnvOverrides(t *testing.T) {
filterOff := false
expProxy := ProxyConfig{
StorageType: "minio",
OlympusGlobalEndpoint: "mytikas.gomods.io",
RedisQueueAddress: ":6380",
Port: ":7000",
FilterOff: &filterOff,
FilterOff: false,
BasicAuthUser: "testuser",
BasicAuthPass: "testpass",
ForceSSL: true,
ValidatorHook: "testhook.io",
PathPrefix: "prefix",
NETRCPath: "/test/path",
}
expOlympus := OlympusConfig{
@@ -32,14 +76,19 @@ func TestEnvOverrides(t *testing.T) {
}
expConf := &Config{
GoEnv: "production",
LogLevel: "info",
GoBinary: "go11",
MaxConcurrency: 4,
MaxWorkerFails: 10,
CloudRuntime: "gcp",
FilterFile: "filter2.conf",
Timeout: 30,
GoEnv: "production",
GoGetWorkers: 10,
ProtocolWorkers: 10,
LogLevel: "info",
BuffaloLogLevel: "info",
GoBinary: "go11",
MaxConcurrency: 4,
MaxWorkerFails: 10,
CloudRuntime: "gcp",
FilterFile: "filter2.conf",
TimeoutConf: TimeoutConf{
Timeout: 30,
},
EnableCSRFProtection: true,
Proxy: &expProxy,
Olympus: &expOlympus,
@@ -60,10 +109,7 @@ func TestEnvOverrides(t *testing.T) {
}
deleteInvalidStorageConfigs(conf.Storage)
eq := cmp.Equal(conf, expConf)
if !eq {
t.Errorf("Environment variables did not correctly override config values. Expected: %+v. Actual: %+v", expConf, conf)
}
compareConfigs(conf, expConf, t)
restoreEnv(envVarBackup)
}
@@ -73,7 +119,9 @@ func TestStorageEnvOverrides(t *testing.T) {
expStorage := &StorageConfig{
CDN: &CDNConfig{
Endpoint: "cdnEndpoint",
Timeout: globalTimeout,
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
Disk: &DiskConfig{
RootPath: "/my/root/path",
@@ -81,7 +129,9 @@ func TestStorageEnvOverrides(t *testing.T) {
GCP: &GCPConfig{
ProjectID: "gcpproject",
Bucket: "gcpbucket",
Timeout: globalTimeout,
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
Minio: &MinioConfig{
Endpoint: "minioEndpoint",
@@ -89,11 +139,16 @@ func TestStorageEnvOverrides(t *testing.T) {
Secret: "minioSecret",
EnableSSL: false,
Bucket: "minioBucket",
Timeout: globalTimeout,
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
Mongo: &MongoConfig{
URL: "mongoURL",
Timeout: 25,
URL: "mongoURL",
CertPath: "/test/path",
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
}
envVars := getEnvMap(&Config{Storage: expStorage})
@@ -111,10 +166,7 @@ func TestStorageEnvOverrides(t *testing.T) {
setStorageTimeouts(conf.Storage, globalTimeout)
deleteInvalidStorageConfigs(conf.Storage)
eq := cmp.Equal(conf.Storage, expStorage)
if !eq {
t.Error("Environment variables did not correctly override storage config values")
}
compareStorageConfigs(conf.Storage, expStorage, t)
restoreEnv(envVarBackup)
}
@@ -147,13 +199,12 @@ func TestParseExampleConfig(t *testing.T) {
globalTimeout := 300
filterOff := true
expProxy := &ProxyConfig{
StorageType: "memory",
OlympusGlobalEndpoint: "http://localhost:3001",
RedisQueueAddress: ":6379",
Port: ":3000",
FilterOff: &filterOff,
FilterOff: true,
BasicAuthUser: "",
BasicAuthPass: "",
}
@@ -168,7 +219,9 @@ func TestParseExampleConfig(t *testing.T) {
expStorage := &StorageConfig{
CDN: &CDNConfig{
Endpoint: "cdn.example.com",
Timeout: globalTimeout,
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
Disk: &DiskConfig{
RootPath: "/path/on/disk",
@@ -176,7 +229,9 @@ func TestParseExampleConfig(t *testing.T) {
GCP: &GCPConfig{
ProjectID: "MY_GCP_PROJECT_ID",
Bucket: "MY_GCP_BUCKET",
Timeout: globalTimeout,
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
Minio: &MinioConfig{
Endpoint: "minio.example.com",
@@ -184,23 +239,33 @@ func TestParseExampleConfig(t *testing.T) {
Secret: "MY_SECRET",
EnableSSL: true,
Bucket: "gomods",
Timeout: globalTimeout,
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
Mongo: &MongoConfig{
URL: "mongo.example.com",
Timeout: globalTimeout,
URL: "mongodb://127.0.0.1:27017",
CertPath: "",
TimeoutConf: TimeoutConf{
Timeout: globalTimeout,
},
},
}
expConf := &Config{
GoEnv: "development",
LogLevel: "debug",
GoBinary: "go",
MaxConcurrency: 4,
MaxWorkerFails: 5,
CloudRuntime: "none",
FilterFile: "filter.conf",
Timeout: 300,
GoEnv: "development",
LogLevel: "debug",
BuffaloLogLevel: "debug",
GoBinary: "go",
GoGetWorkers: 30,
ProtocolWorkers: 30,
MaxConcurrency: 4,
MaxWorkerFails: 5,
CloudRuntime: "none",
FilterFile: "filter.conf",
TimeoutConf: TimeoutConf{
Timeout: 300,
},
EnableCSRFProtection: false,
Proxy: expProxy,
Olympus: expOlympus,
@@ -216,10 +281,7 @@ func TestParseExampleConfig(t *testing.T) {
t.Errorf("Unable to parse example config file: %+v", err)
}
eq := cmp.Equal(parsedConf, expConf)
if !eq {
t.Errorf("Parsed Example configuration did not match expected values. Expected: %+v. Actual: %+v", expConf, parsedConf)
}
compareConfigs(parsedConf, expConf, t)
restoreEnv(envVarBackup)
}
@@ -228,24 +290,28 @@ func TestConfigOverridesDefault(t *testing.T) {
// set values to anything but defaults
config := &Config{
Timeout: 1,
TimeoutConf: TimeoutConf{
Timeout: 1,
},
Storage: &StorageConfig{
Minio: &MinioConfig{
Bucket: "notgomods",
EnableSSL: false,
Timeout: 42,
TimeoutConf: TimeoutConf{
Timeout: 42,
},
},
},
}
// should be identical to config above
expConfig := &Config{
Timeout: config.Timeout,
TimeoutConf: config.TimeoutConf,
Storage: &StorageConfig{
Minio: &MinioConfig{
Bucket: config.Storage.Minio.Bucket,
EnableSSL: config.Storage.Minio.EnableSSL,
Timeout: config.Storage.Minio.Timeout,
Bucket: config.Storage.Minio.Bucket,
EnableSSL: config.Storage.Minio.EnableSSL,
TimeoutConf: config.Storage.Minio.TimeoutConf,
},
},
}
@@ -277,7 +343,10 @@ func getEnvMap(config *Config) map[string]string {
envVars := map[string]string{
"GO_ENV": config.GoEnv,
"GO_BINARY_PATH": config.GoBinary,
"ATHENS_GOGET_WORKERS": strconv.Itoa(config.GoGetWorkers),
"ATHENS_PROTOCOL_WORKERS": strconv.Itoa(config.ProtocolWorkers),
"ATHENS_LOG_LEVEL": config.LogLevel,
"BUFFALO_LOG_LEVEL": config.BuffaloLogLevel,
"ATHENS_CLOUD_RUNTIME": config.CloudRuntime,
"ATHENS_MAX_CONCURRENCY": strconv.Itoa(config.MaxConcurrency),
"ATHENS_MAX_WORKER_FAILS": strconv.FormatUint(uint64(config.MaxWorkerFails), 10),
@@ -291,18 +360,20 @@ func getEnvMap(config *Config) map[string]string {
envVars["ATHENS_STORAGE_TYPE"] = proxy.StorageType
envVars["OLYMPUS_GLOBAL_ENDPOINT"] = proxy.OlympusGlobalEndpoint
envVars["PORT"] = proxy.Port
envVars["ATHENS_REDIS_QUEUE_PORT"] = proxy.RedisQueueAddress
if proxy.FilterOff != nil {
envVars["PROXY_FILTER_OFF"] = strconv.FormatBool(*proxy.FilterOff)
}
envVars["ATHENS_REDIS_QUEUE_ADDRESS"] = proxy.RedisQueueAddress
envVars["PROXY_FILTER_OFF"] = strconv.FormatBool(proxy.FilterOff)
envVars["BASIC_AUTH_USER"] = proxy.BasicAuthUser
envVars["BASIC_AUTH_PASS"] = proxy.BasicAuthPass
envVars["PROXY_FORCE_SSL"] = strconv.FormatBool(proxy.ForceSSL)
envVars["ATHENS_PROXY_VALIDATOR"] = proxy.ValidatorHook
envVars["ATHENS_PATH_PREFIX"] = proxy.PathPrefix
envVars["ATHENS_NETRC_PATH"] = proxy.NETRCPath
}
olympus := config.Olympus
if olympus != nil {
envVars["OLYMPUS_BACKGROUND_WORKER_TYPE"] = olympus.WorkerType
envVars["OLYMPUS_REDIS_QUEUE_PORT"] = olympus.RedisQueueAddress
envVars["OLYMPUS_REDIS_QUEUE_ADDRESS"] = olympus.RedisQueueAddress
}
storage := config.Storage
@@ -326,7 +397,7 @@ func getEnvMap(config *Config) map[string]string {
}
if storage.Mongo != nil {
envVars["ATHENS_MONGO_STORAGE_URL"] = storage.Mongo.URL
envVars["MONGO_CONN_TIMEOUT_SEC"] = strconv.Itoa(storage.Mongo.Timeout)
envVars["ATHENS_MONGO_CERT_PATH"] = storage.Mongo.CertPath
}
}
return envVars
+6 -2
View File
@@ -5,10 +5,14 @@ type ProxyConfig struct {
StorageType string `validate:"required" envconfig:"ATHENS_STORAGE_TYPE"`
OlympusGlobalEndpoint string `validate:"required" envconfig:"OLYMPUS_GLOBAL_ENDPOINT"`
Port string `validate:"required" envconfig:"PORT"`
RedisQueueAddress string `validate:"required" envconfig:"ATHENS_REDIS_QUEUE_PORT"`
FilterOff *bool `validate:"required" envconfig:"PROXY_FILTER_OFF"`
RedisQueueAddress string `validate:"required" envconfig:"ATHENS_REDIS_QUEUE_ADDRESS"`
FilterOff bool `envconfig:"PROXY_FILTER_OFF"`
BasicAuthUser string `envconfig:"BASIC_AUTH_USER"`
BasicAuthPass string `envconfig:"BASIC_AUTH_PASS"`
ForceSSL bool `envconfig:"PROXY_FORCE_SSL"`
ValidatorHook string `envconfig:"ATHENS_PROXY_VALIDATOR"`
PathPrefix string `envconfig:"ATHENS_PATH_PREFIX"`
NETRCPath string `envconfig:"ATHENS_NETRC_PATH"`
}
// BasicAuth returns BasicAuthUser and BasicAuthPassword
+13
View File
@@ -0,0 +1,13 @@
package config
import "time"
// TimeoutConf is a common struct for anything with a timeout
type TimeoutConf struct {
Timeout int `validate:"required"`
}
// TimeoutDuration returns the timeout as time.duration
func (t *TimeoutConf) TimeoutDuration() time.Duration {
return time.Second * time.Duration(t.Timeout)
}
+7 -2
View File
@@ -11,7 +11,7 @@ import (
"testing"
"time"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/module"
"github.com/gomods/athens/pkg/stash"
"github.com/gomods/athens/pkg/storage"
@@ -21,9 +21,14 @@ import (
"golang.org/x/sync/errgroup"
)
var (
testConfigPath = filepath.Join("..", "..", "config.test.toml")
)
func getDP(t *testing.T) Protocol {
t.Helper()
goBin := env.GoBinPath()
conf := config.GetConfLogErr(testConfigPath, t)
goBin := conf.GoBinary
fs := afero.NewOsFs()
mf, err := module.NewGoGetFetcher(goBin, fs)
if err != nil {
+7 -5
View File
@@ -6,10 +6,10 @@ import (
"fmt"
"io/ioutil"
"net"
"time"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/eventlog"
)
@@ -20,20 +20,22 @@ type Log struct {
col string // collection
url string
certPath string
timeout time.Duration
}
// NewLog creates event log from backing mongo database
func NewLog(url, certPath string) (*Log, error) {
return NewLogWithCollection(url, certPath, "eventlog")
func NewLog(url, certPath string, timeout time.Duration) (*Log, error) {
return NewLogWithCollection(url, certPath, "eventlog", timeout)
}
// NewLogWithCollection creates event log from backing mongo database
func NewLogWithCollection(url, certPath, collection string) (*Log, error) {
func NewLogWithCollection(url, certPath, collection string, timeout time.Duration) (*Log, error) {
m := &Log{
url: url,
col: collection,
db: "athens",
certPath: certPath,
timeout: timeout,
}
return m, m.Connect()
}
@@ -124,7 +126,7 @@ func (m *Log) newSession() (*mgo.Session, error) {
return nil, err
}
dialInfo.Timeout = env.MongoConnectionTimeoutSecWithDefault(1)
dialInfo.Timeout = m.timeout
if m.certPath != "" {
roots := x509.NewCertPool()
+1 -1
View File
@@ -18,7 +18,7 @@ func TestMongo(t *testing.T) {
}
func (m *MongoTests) SetupTest() {
store, err := NewLog("mongodb://127.0.0.1:27017", "")
store, err := NewLog("mongodb://127.0.0.1:27017", "", time.Second)
if err != nil {
panic(err)
}
+4 -5
View File
@@ -6,7 +6,6 @@ import (
"strings"
"github.com/gobuffalo/buffalo"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/module"
"github.com/gomods/athens/pkg/paths"
@@ -14,7 +13,7 @@ import (
// NewFilterMiddleware builds a middleware function that implements the filters configured in
// the filter file.
func NewFilterMiddleware(mf *module.Filter) buffalo.MiddlewareFunc {
func NewFilterMiddleware(mf *module.Filter, olympusEndpoint string) buffalo.MiddlewareFunc {
const op errors.Op = "actions.FilterMiddleware"
return func(next buffalo.Handler) buffalo.Handler {
@@ -42,7 +41,7 @@ func NewFilterMiddleware(mf *module.Filter) buffalo.MiddlewareFunc {
return next(c)
case module.Include:
// TODO : spin up cache filling worker and serve the request using the cache
newURL := redirectToOlympusURL(c.Request().URL)
newURL := redirectToOlympusURL(olympusEndpoint, c.Request().URL)
return c.Redirect(http.StatusSeeOther, newURL)
}
@@ -55,6 +54,6 @@ func isPseudoVersion(version string) bool {
return strings.HasPrefix(version, "v0.0.0-")
}
func redirectToOlympusURL(u *url.URL) string {
return strings.TrimSuffix(env.GetOlympusEndpoint(), "/") + u.Path
func redirectToOlympusURL(olympusEndpoint string, u *url.URL) string {
return strings.TrimSuffix(olympusEndpoint, "/") + u.Path
}
+33 -10
View File
@@ -5,38 +5,48 @@ import (
"fmt"
"net/http"
"net/http/httptest"
"path/filepath"
"testing"
"github.com/bketelsen/buffet"
"github.com/gobuffalo/buffalo"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/log"
"github.com/gomods/athens/pkg/module"
"github.com/markbates/willie"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
jConfig "github.com/uber/jaeger-client-go/config"
)
// Avoid import cycle.
const pathList = "/{module:.+}/@v/list"
const pathVersionInfo = "/{module:.+}/@v/{version}.info"
const (
pathList = "/{module:.+}/@v/list"
pathVersionInfo = "/{module:.+}/@v/{version}.info"
)
func middlewareFilterApp() *buffalo.App {
var (
testConfigFile = filepath.Join("..", "..", "config.test.toml")
)
func middlewareFilterApp(filterFile, olympusEndpoint string) *buffalo.App {
h := func(c buffalo.Context) error {
return c.Render(200, nil)
}
a := buffalo.New(buffalo.Options{})
mf := newTestFilter()
a.Use(NewFilterMiddleware(mf))
mf := newTestFilter(filterFile)
a.Use(NewFilterMiddleware(mf, olympusEndpoint))
initializeTracing(a)
a.GET(pathList, h)
a.GET(pathVersionInfo, h)
return a
}
func newTestFilter() *module.Filter {
f := module.NewFilter()
func newTestFilter(filterFile string) *module.Filter {
f := module.NewFilter(filterFile)
f.AddRule("github.com/gomods/athens/", module.Include)
f.AddRule("github.com/athens-artifacts/no-tags", module.Exclude)
f.AddRule("github.com/athens-artifacts", module.Direct)
@@ -46,12 +56,17 @@ func newTestFilter() *module.Filter {
func Test_FilterMiddleware(t *testing.T) {
r := require.New(t)
w := willie.New(middlewareFilterApp())
conf := config.GetConfLogErr(testConfigFile, t)
if conf.Proxy == nil {
t.Fatalf("No Proxy configuration in test config")
}
app := middlewareFilterApp(conf.FilterFile, conf.Proxy.OlympusGlobalEndpoint)
w := willie.New(app)
// Public, expects to be redirected to olympus
res := w.Request("/github.com/gomods/athens/@v/list").Get()
r.Equal(303, res.Code)
r.Equal(env.GetOlympusEndpoint()+"/github.com/gomods/athens/@v/list", res.HeaderMap.Get("Location"))
r.Equal(conf.Proxy.OlympusGlobalEndpoint+"/github.com/gomods/athens/@v/list", res.HeaderMap.Get("Location"))
// Excluded, expects a 403
res = w.Request("/github.com/athens-artifacts/no-tags/@v/list").Get()
@@ -151,3 +166,11 @@ func (suite *HookTestsSuite) TestHookUnexpectedError() {
r.True(suite.mock.invoked)
r.Equal(http.StatusInternalServerError, res.Code)
}
func initializeTracing(app *buffalo.App) {
var cfg jConfig.Configuration
tracer, _, _ := cfg.New(
"athens.proxy",
)
app.Use(buffet.OpenTracing(tracer))
}
+9 -8
View File
@@ -5,7 +5,6 @@ import (
"os"
"strings"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/errors"
)
@@ -15,7 +14,8 @@ var (
// Filter is a filter of modules
type Filter struct {
root ruleNode
root ruleNode
filePath string
}
// NewFilter creates new filter based on rules defined in a configuration file
@@ -29,9 +29,11 @@ type Filter struct {
// -
// + github.com/a
// will exclude all items from communication except github.com/a
func NewFilter() *Filter {
func NewFilter(filterFilePath string) *Filter {
rn := newRule(Default)
modFilter := Filter{}
modFilter := Filter{
filePath: filterFilePath,
}
modFilter.root = rn
modFilter.initFromConfig()
@@ -115,7 +117,7 @@ func (f *Filter) getAssociatedRule(path ...string) FilterRule {
}
func (f *Filter) initFromConfig() {
lines, err := getConfigLines()
lines, err := getConfigLines(f.filePath)
if err != nil || len(lines) == 0 {
return
@@ -170,11 +172,10 @@ func newRule(r FilterRule) ruleNode {
return rn
}
func getConfigLines() ([]string, error) {
func getConfigLines(filterFile string) ([]string, error) {
const op errors.Op = "module.getConfigLines"
configName := env.FilterConfigurationFileName()
f, err := os.Open(configName)
f, err := os.Open(filterFile)
if err != nil {
return nil, errors.E(op, err)
}
+14 -6
View File
@@ -1,11 +1,17 @@
package module
import (
"path/filepath"
"testing"
"github.com/gomods/athens/pkg/config"
"github.com/stretchr/testify/suite"
)
var (
testConfigFile = filepath.Join("..", "..", "config.test.toml")
)
type FilterTests struct {
suite.Suite
}
@@ -16,8 +22,8 @@ func Test_Filter(t *testing.T) {
func (t *FilterTests) Test_IgnoreSimple() {
r := t.Require()
f := NewFilter()
conf := config.GetConfLogErr(testConfigFile, t.T())
f := NewFilter(conf.FilterFile)
f.AddRule("github.com/a/b", Exclude)
r.Equal(Include, f.Rule("github.com/a"))
@@ -29,8 +35,8 @@ func (t *FilterTests) Test_IgnoreSimple() {
func (t *FilterTests) Test_IgnoreParentAllowChildren() {
r := t.Require()
f := NewFilter()
conf := config.GetConfLogErr(testConfigFile, t.T())
f := NewFilter(conf.FilterFile)
f.AddRule("github.com/a/b", Exclude)
f.AddRule("github.com/a/b/c", Include)
@@ -44,7 +50,8 @@ func (t *FilterTests) Test_IgnoreParentAllowChildren() {
func (t *FilterTests) Test_OnlyAllowed() {
r := t.Require()
f := NewFilter()
conf := config.GetConfLogErr(testConfigFile, t.T())
f := NewFilter(conf.FilterFile)
f.AddRule("github.com/a/b", Include)
f.AddRule("", Exclude)
@@ -58,7 +65,8 @@ func (t *FilterTests) Test_OnlyAllowed() {
func (t *FilterTests) Test_Direct() {
r := t.Require()
f := NewFilter()
conf := config.GetConfLogErr(testConfigFile, t.T())
f := NewFilter(conf.FilterFile)
f.AddRule("github.com/a/b/c", Exclude)
f.AddRule("github.com/a/b", Direct)
f.AddRule("github.com/a", Include)
+2 -2
View File
@@ -6,7 +6,7 @@ import (
"io/ioutil"
"log"
"github.com/gomods/athens/pkg/config/env"
"github.com/gobuffalo/envy"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
)
@@ -53,7 +53,7 @@ func (s *ModuleSuite) TestGoGetFetcherFetch() {
func ExampleFetcher() {
repoURI := "github.com/arschles/assert"
version := "v1.0.0"
goBinaryName := env.GoBinPath()
goBinaryName := envy.Get("GO_BINARY_PATH", "go")
fetcher, err := NewGoGetFetcher(goBinaryName, afero.NewOsFs())
if err != nil {
log.Fatal(err)
+1 -4
View File
@@ -11,9 +11,6 @@ import (
"go.opencensus.io/trace"
)
// ENV is used to define the sampling rate
var ENV = env.GoEnvironmentWithDefault("development")
// observabilityContext is a private context that is used by the packages to start the span
type observabilityContext struct {
buffalo.Context
@@ -23,7 +20,7 @@ type observabilityContext struct {
// RegisterTraceExporter returns a jaeger exporter for exporting traces to opencensus.
// It should in the future have a nice sampling rate defined
// TODO: Extend beyond jaeger
func RegisterTraceExporter(service string) (*(jaeger.Exporter), error) {
func RegisterTraceExporter(service, ENV string) (*(jaeger.Exporter), error) {
const op errors.Op = "RegisterTracer"
collectorEndpointURI := env.TraceExporterURL()
if collectorEndpointURI == "" {
+11 -10
View File
@@ -7,10 +7,10 @@ import (
"io"
"net/url"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/observ"
moduploader "github.com/gomods/athens/pkg/storage/module"
"github.com/opentracing/opentracing-go"
)
type client interface {
@@ -22,27 +22,28 @@ type client interface {
type Storage struct {
cl client
baseURI *url.URL
cdnConf *config.CDNConfig
}
// New creates a new azure CDN saver
func New(accountName, accountKey, containerName string) (*Storage, error) {
func New(accountName, accountKey, containerName string, cdnConf *config.CDNConfig) (*Storage, error) {
const op errors.Op = "azurecdn.New"
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
if err != nil {
return nil, errors.E(op, err)
}
cl := newBlobStoreClient(u, accountName, accountKey, containerName)
return &Storage{cl: cl, baseURI: u}, nil
return &Storage{cl: cl, baseURI: u, cdnConf: cdnConf}, nil
}
// newWithClient creates a new azure CDN saver
func newWithClient(accountName, cl client) (*Storage, error) {
func newWithClient(accountName, cl client, cdnConf *config.CDNConfig) (*Storage, error) {
const op errors.Op = "azurecdn.newWithClient"
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
if err != nil {
return nil, errors.E(op, err)
}
return &Storage{cl: cl, baseURI: u}, nil
return &Storage{cl: cl, baseURI: u, cdnConf: cdnConf}, nil
}
// BaseURL returns the base URL that stores all modules. It can be used
@@ -52,15 +53,15 @@ func newWithClient(accountName, cl client) (*Storage, error) {
//
// <meta name="go-import" content="gomods.com/athens mod BaseURL()">
func (s Storage) BaseURL() *url.URL {
return env.CDNEndpointWithDefault(s.baseURI)
return s.cdnConf.CDNEndpointWithDefault(s.baseURI)
}
// Save implements the (github.com/gomods/athens/pkg/storage).Saver interface.
func (s *Storage) Save(ctx context.Context, module, version string, mod []byte, zip io.Reader, info []byte) error {
const op errors.Op = "azurecdn.Save"
ctx, span := observ.StartSpan(ctx, op.String())
span.End()
err := moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.cl.UploadWithContext)
sp, ctx := opentracing.StartSpanFromContext(ctx, "storage.azurecdn.Save")
sp.Finish()
err := moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.cl.UploadWithContext, s.cdnConf.TimeoutDuration())
// TODO: take out lease on the /list file and add the version to it
//
// Do that only after module source+metadata is uploaded
+3 -1
View File
@@ -4,7 +4,9 @@ import (
"context"
"net/url"
"testing"
"time"
"github.com/gomods/athens/pkg/config"
"github.com/stretchr/testify/suite"
)
@@ -30,7 +32,7 @@ func (g *GcpTests) SetupSuite() {
g.version = "v1.2.3"
g.url, _ = url.Parse("https://storage.googleapis.com/testbucket")
g.bucket = newBucketMock()
g.store = newWithBucket(g.bucket, g.url)
g.store = newWithBucket(g.bucket, g.url, time.Second, &config.CDNConfig{})
}
func TestGcpStorage(t *testing.T) {
+1 -1
View File
@@ -24,5 +24,5 @@ func (s *Storage) Delete(ctx context.Context, module, version string) error {
return errors.E(op, errors.M(module), errors.V(version), errors.KindNotFound)
}
return modupl.Delete(ctx, module, version, s.bucket.Delete)
return modupl.Delete(ctx, module, version, s.bucket.Delete, s.timeout)
}
+13 -9
View File
@@ -5,9 +5,10 @@ import (
"fmt"
"net/http"
"net/url"
"time"
"cloud.google.com/go/storage"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
"google.golang.org/api/googleapi"
)
@@ -18,6 +19,8 @@ type Storage struct {
baseURI *url.URL
closeStorage func() error
projectID string
cdnConf *config.CDNConfig
timeout time.Duration
}
// New returns a new Storage instance backed by a Google Cloud Storage bucket.
@@ -28,22 +31,19 @@ type Storage struct {
// to the path of your service account file. If you're running on GCP (e.g. AppEngine),
// credentials will be automatically provided.
// See https://cloud.google.com/docs/authentication/getting-started.
func New(ctx context.Context) (*Storage, error) {
func New(ctx context.Context, gcpConf *config.GCPConfig, cdnConf *config.CDNConfig) (*Storage, error) {
const op errors.Op = "gcp.New"
storage, err := storage.NewClient(ctx)
if err != nil {
return nil, errors.E(op, fmt.Errorf("could not create new storage client: %s", err))
}
bucketname, err := env.GCPBucketName()
if err != nil {
return nil, errors.E(op, err)
}
bucketname := gcpConf.Bucket
u, err := url.Parse(fmt.Sprintf("https://storage.googleapis.com/%s", bucketname))
if err != nil {
return nil, errors.E(op, err)
}
bkt := gcpBucket{storage.Bucket(bucketname)}
err = bkt.Create(ctx, env.GCPProjectID(), nil)
err = bkt.Create(ctx, gcpConf.ProjectID, nil)
if err != nil && !bucketExistsErr(err) {
return nil, errors.E(op, err)
}
@@ -52,6 +52,8 @@ func New(ctx context.Context) (*Storage, error) {
bucket: &bkt,
baseURI: u,
closeStorage: storage.Close,
cdnConf: cdnConf,
timeout: gcpConf.TimeoutDuration(),
}, nil
}
@@ -64,11 +66,13 @@ func bucketExistsErr(err error) bool {
return apiErr.Code == http.StatusConflict
}
func newWithBucket(bkt Bucket, uri *url.URL) *Storage {
func newWithBucket(bkt Bucket, uri *url.URL, timeout time.Duration, cdnConf *config.CDNConfig) *Storage {
return &Storage{
bucket: bkt,
baseURI: uri,
closeStorage: func() error { return nil },
timeout: timeout,
cdnConf: cdnConf,
}
}
@@ -79,7 +83,7 @@ func newWithBucket(bkt Bucket, uri *url.URL) *Storage {
//
// <meta name="go-import" content="gomods.com/athens mod BaseURL()">
func (s *Storage) BaseURL() *url.URL {
return env.CDNEndpointWithDefault(s.baseURI)
return s.cdnConf.CDNEndpointWithDefault(s.baseURI)
}
// Close calls the underlying storage client's close method
+1 -1
View File
@@ -31,7 +31,7 @@ func (s *Storage) Save(ctx context.Context, module, version string, mod []byte,
return errors.E(op, "already exists", errors.M(module), errors.V(version), errors.KindAlreadyExists)
}
err = moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.upload)
err = moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.upload, s.timeout)
if err != nil {
return errors.E(op, err, errors.M(module), errors.V(version))
}
+9 -1
View File
@@ -3,6 +3,7 @@ package minio
import (
"testing"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/storage"
minio "github.com/minio/minio-go"
"github.com/stretchr/testify/suite"
@@ -19,7 +20,14 @@ func (d *MinioTests) SetupTest() {
d.bucketName = "gomods"
d.accessKeyID = "minio"
d.secretAccessKey = "minio123"
storage, err := NewStorage(d.endpoint, d.accessKeyID, d.secretAccessKey, d.bucketName, false)
conf := &config.MinioConfig{
Endpoint: d.endpoint,
Bucket: d.bucketName,
Key: d.accessKeyID,
Secret: d.secretAccessKey,
EnableSSL: false,
}
storage, err := NewStorage(conf)
d.Require().NoError(err)
d.storage = storage
}
+7 -1
View File
@@ -3,6 +3,7 @@ package minio
import (
"fmt"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/storage"
minio "github.com/minio/minio-go"
@@ -19,8 +20,13 @@ func (s *storageImpl) versionLocation(module, version string) string {
// NewStorage returns a new ListerSaver implementation that stores
// everything under rootDir
func NewStorage(endpoint, accessKeyID, secretAccessKey, bucketName string, useSSL bool) (storage.Backend, error) {
func NewStorage(conf *config.MinioConfig) (storage.Backend, error) {
const op errors.Op = "minio.NewStorage"
endpoint := conf.Endpoint
accessKeyID := conf.Key
secretAccessKey := conf.Secret
bucketName := conf.Bucket
useSSL := conf.EnableSSL
minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
if err != nil {
return nil, errors.E(op, err)
+9 -1
View File
@@ -2,6 +2,7 @@ package minio
import (
"github.com/gobuffalo/suite"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/storage"
minio "github.com/minio/minio-go"
)
@@ -19,7 +20,14 @@ func NewTestSuite(model *suite.Model) (storage.TestSuite, error) {
bucketName := "gomods"
accessKeyID := "minio"
secretAccessKey := "minio123"
minioStorage, err := NewStorage(endpoint, accessKeyID, secretAccessKey, bucketName, false)
conf := &config.MinioConfig{
Endpoint: endpoint,
Bucket: bucketName,
Key: accessKeyID,
Secret: secretAccessKey,
EnableSSL: false,
}
minioStorage, err := NewStorage(conf)
return &TestSuite{
storage: minioStorage,
+3 -3
View File
@@ -3,9 +3,9 @@ package module
import (
"context"
"fmt"
"time"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/errors"
multierror "github.com/hashicorp/go-multierror"
)
@@ -15,9 +15,9 @@ type Deleter func(ctx context.Context, path string) error
// Delete deletes .info, .mod and .zip files from the blob store in parallel.
// Returns multierror containing errors from all deletes and timeouts
func Delete(ctx context.Context, module, version string, delete Deleter) error {
func Delete(ctx context.Context, module, version string, delete Deleter, timeout time.Duration) error {
const op errors.Op = "module.Delete"
tctx, cancel := context.WithTimeout(ctx, env.Timeout())
tctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
del := func(ext string) <-chan error {
+2 -2
View File
@@ -29,7 +29,7 @@ func (d *DeleteTests) TearDownTest() {
func (d *DeleteTests) TestDeleteTimeout() {
r := d.Require()
err := Delete(context.Background(), "mx", "1.1.1", delWithTimeout)
err := Delete(context.Background(), "mx", "1.1.1", delWithTimeout, time.Second)
r.Error(err, "deleter returned at least one error")
r.Contains(err.Error(), "deleting mx.1.1.1.info failed: context deadline exceeded")
@@ -40,7 +40,7 @@ func (d *DeleteTests) TestDeleteTimeout() {
func (d *DeleteTests) TestDeleteError() {
r := d.Require()
err := Delete(context.Background(), "mx", "1.1.1", delWithErr)
err := Delete(context.Background(), "mx", "1.1.1", delWithErr, time.Second)
r.Error(err, "deleter returned at least one error")
r.Contains(err.Error(), "some err")
+3 -3
View File
@@ -4,9 +4,9 @@ import (
"context"
"fmt"
"io"
"time"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/errors"
multierror "github.com/hashicorp/go-multierror"
)
@@ -18,9 +18,9 @@ type Uploader func(ctx context.Context, path, contentType string, stream io.Read
// Upload saves .info, .mod and .zip files to the blob store in parallel.
// Returns multierror containing errors from all uploads and timeouts
func Upload(ctx context.Context, module, version string, info, mod, zip io.Reader, uploader Uploader) error {
func Upload(ctx context.Context, module, version string, info, mod, zip io.Reader, uploader Uploader, timeout time.Duration) error {
const op errors.Op = "module.Upload"
tctx, cancel := context.WithTimeout(ctx, env.Timeout())
tctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
save := func(ext, contentType string, stream io.Reader) <-chan error {
+2 -2
View File
@@ -31,7 +31,7 @@ func (u *UploadTests) TearDownTest() {
func (u *UploadTests) TestUploadTimeout() {
r := u.Require()
rd := bytes.NewReader([]byte("123"))
err := Upload(context.Background(), "mx", "1.1.1", rd, rd, rd, uplWithTimeout)
err := Upload(context.Background(), "mx", "1.1.1", rd, rd, rd, uplWithTimeout, time.Second)
r.Error(err, "deleter returned at least one error")
r.Contains(err.Error(), "uploading mx.1.1.1.info failed: context deadline exceeded")
r.Contains(err.Error(), "uploading mx.1.1.1.zip failed: context deadline exceeded")
@@ -41,7 +41,7 @@ func (u *UploadTests) TestUploadTimeout() {
func (u *UploadTests) TestUploadError() {
r := u.Require()
rd := bytes.NewReader([]byte("123"))
err := Upload(context.Background(), "mx", "1.1.1", rd, rd, rd, uplWithErr)
err := Upload(context.Background(), "mx", "1.1.1", rd, rd, rd, uplWithErr, time.Second)
r.Error(err, "deleter returned at least one error")
r.Contains(err.Error(), "some err")
}
+1 -1
View File
@@ -11,7 +11,7 @@ type MongoTests struct {
}
func (d *MongoTests) SetupTest() {
ms, err := newTestStore()
ms, err := newTestStore(testConfigFile)
d.Require().NoError(err)
+11 -12
View File
@@ -7,9 +7,10 @@ import (
"io/ioutil"
"net"
"strings"
"time"
"github.com/globalsign/mgo"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
)
@@ -20,19 +21,17 @@ type ModuleStore struct {
c string // collection
url string
certPath string
timeout time.Duration
}
// NewStorage returns a connected Mongo backed storage
// that satisfies the Backend interface.
func NewStorage(connectionString string) (*ModuleStore, error) {
return NewStorageWithCert(connectionString, "")
}
// NewStorageWithCert returns a connected Mongo backed storage
// that satisfies the Backend interface.
func NewStorageWithCert(connectionString, certPath string) (*ModuleStore, error) {
func NewStorage(conf *config.MongoConfig) (*ModuleStore, error) {
const op errors.Op = "fs.NewStorage"
ms := &ModuleStore{url: connectionString, certPath: certPath}
if conf == nil {
return nil, errors.E(op, "No Mongo Configuration provided")
}
ms := &ModuleStore{url: conf.URL, certPath: conf.CertPath, timeout: conf.TimeoutDuration()}
err := ms.connect()
if err != nil {
@@ -46,7 +45,7 @@ func (m *ModuleStore) connect() error {
const op errors.Op = "mongo.connect"
var err error
m.s, err = m.newSession()
m.s, err = m.newSession(m.timeout)
if err != nil {
return errors.E(op, err)
}
@@ -66,7 +65,7 @@ func (m *ModuleStore) connect() error {
return c.EnsureIndex(index)
}
func (m *ModuleStore) newSession() (*mgo.Session, error) {
func (m *ModuleStore) newSession(timeout time.Duration) (*mgo.Session, error) {
tlsConfig := &tls.Config{}
dialInfo, err := mgo.ParseURL(m.url)
@@ -74,7 +73,7 @@ func (m *ModuleStore) newSession() (*mgo.Session, error) {
return nil, err
}
dialInfo.Timeout = env.MongoConnectionTimeoutSecWithDefault(1)
dialInfo.Timeout = timeout
if m.certPath != "" {
roots := x509.NewCertPool()
+6 -5
View File
@@ -1,16 +1,17 @@
package mongo
import "github.com/gomods/athens/pkg/config/env"
import (
"github.com/gomods/athens/pkg/config"
)
func (m *MongoTests) TestNewMongoStorage() {
r := m.Require()
muri := env.MongoConnectionString()
certPath := env.MongoCertPath()
getterSaver, err := NewStorageWithCert(muri, certPath)
conf := config.GetConfLogErr(testConfigFile, m.T())
getterSaver, err := NewStorage(conf.Storage.Mongo)
r.NoError(err)
r.NotNil(getterSaver.c)
r.NotNil(getterSaver.d)
r.NotNil(getterSaver.s)
r.Equal(getterSaver.url, muri)
r.Equal(getterSaver.url, conf.Storage.Mongo.URL)
}
+22 -10
View File
@@ -2,13 +2,18 @@ package mongo
import (
"fmt"
"path/filepath"
"github.com/globalsign/mgo"
"github.com/gobuffalo/suite"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/storage"
)
var (
testConfigFile = filepath.Join("..", "..", "..", "config.test.toml")
)
// TestSuite implements storage.TestSuite interface
type TestSuite struct {
*suite.Model
@@ -16,8 +21,8 @@ type TestSuite struct {
}
// NewTestSuite creates a common test suite
func NewTestSuite(model *suite.Model) (storage.TestSuite, error) {
ms, err := newTestStore()
func NewTestSuite(model *suite.Model, configFile string) (storage.TestSuite, error) {
ms, err := newTestStore(configFile)
if err != nil {
return nil, err
}
@@ -27,10 +32,12 @@ func NewTestSuite(model *suite.Model) (storage.TestSuite, error) {
}, err
}
func newTestStore() (*ModuleStore, error) {
muri := env.MongoConnectionString()
certPath := env.MongoCertPath()
mongoStore, err := NewStorageWithCert(muri, certPath)
func newTestStore(configFile string) (*ModuleStore, error) {
conf, err := config.GetConf(configFile)
if err != nil {
return nil, err
}
mongoStore, err := NewStorage(conf.Storage.Mongo)
if err != nil {
return nil, fmt.Errorf("Not able to connect to mongo storage: %s", err.Error())
}
@@ -50,9 +57,14 @@ func (ts *TestSuite) StorageHumanReadableName() string {
// Cleanup tears down test
func (ts *TestSuite) Cleanup() error {
muri := env.MongoConnectionString()
timeout := env.MongoConnectionTimeoutSecWithDefault(1)
s, err := mgo.DialWithTimeout(muri, timeout)
conf, err := config.GetConf(testConfigFile)
if err != nil {
return err
}
if conf.Storage == nil || conf.Storage.Mongo == nil {
return fmt.Errorf("Invalid Mongo Storage Provided")
}
s, err := mgo.DialWithTimeout(conf.Storage.Mongo.URL, conf.Storage.Mongo.TimeoutDuration())
defer s.Close()
if err != nil {
return err
+11 -1
View File
@@ -2,11 +2,17 @@ package s3
import (
"fmt"
"path/filepath"
"testing"
"github.com/gomods/athens/pkg/config"
"github.com/stretchr/testify/suite"
)
var (
testConfigFile = filepath.Join("..", "..", "..", "config.test.toml")
)
type S3Tests struct {
suite.Suite
uploader *s3UploaderMock
@@ -15,7 +21,11 @@ type S3Tests struct {
func Test_ActionSuite(t *testing.T) {
uploaderMock := newUploaderMock()
storage, err := NewWithUploader("test", uploaderMock)
conf := config.GetConfLogErr(testConfigFile, t)
if conf.Storage == nil || conf.Storage.CDN == nil {
t.Fatalf("Invalid CDN Config provided")
}
storage, err := NewWithUploader("test", uploaderMock, conf.Storage.CDN)
if err != nil {
t.Error(err)
}
+12 -8
View File
@@ -10,10 +10,11 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface"
"github.com/gomods/athens/pkg/config/env"
"github.com/gomods/athens/pkg/config"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/observ"
moduploader "github.com/gomods/athens/pkg/storage/module"
"github.com/opentracing/opentracing-go"
)
// Storage implements (github.com/gomods/athens/pkg/storage).Saver and
@@ -28,10 +29,11 @@ type Storage struct {
bucket string
baseURI *url.URL
uploader s3manageriface.UploaderAPI
cdnConf *config.CDNConfig
}
// New creates a new AWS S3 CDN saver
func New(bucketName string) (*Storage, error) {
func New(bucketName string, cdnConf *config.CDNConfig) (*Storage, error) {
const op errors.Op = "s3.New"
u, err := url.Parse(fmt.Sprintf("http://%s.s3.amazonaws.com", bucketName))
if err != nil {
@@ -49,11 +51,12 @@ func New(bucketName string) (*Storage, error) {
bucket: bucketName,
uploader: uploader,
baseURI: u,
cdnConf: cdnConf,
}, nil
}
// NewWithUploader creates a new AWS S3 CDN saver with provided uploader
func NewWithUploader(bucketName string, uploader s3manageriface.UploaderAPI) (*Storage, error) {
func NewWithUploader(bucketName string, uploader s3manageriface.UploaderAPI, cdnConf *config.CDNConfig) (*Storage, error) {
const op errors.Op = "s3.NewWithUploader"
u, err := url.Parse(fmt.Sprintf("http://%s.s3.amazonaws.com", bucketName))
if err != nil {
@@ -64,6 +67,7 @@ func NewWithUploader(bucketName string, uploader s3manageriface.UploaderAPI) (*S
bucket: bucketName,
uploader: uploader,
baseURI: u,
cdnConf: cdnConf,
}, nil
}
@@ -74,15 +78,15 @@ func NewWithUploader(bucketName string, uploader s3manageriface.UploaderAPI) (*S
//
// <meta name="go-import" content="gomods.com/athens mod BaseURL()">
func (s Storage) BaseURL() *url.URL {
return env.CDNEndpointWithDefault(s.baseURI)
return s.cdnConf.CDNEndpointWithDefault(s.baseURI)
}
// Save implements the (github.com/gomods/athens/pkg/storage).Saver interface.
func (s *Storage) Save(ctx context.Context, module, version string, mod []byte, zip io.Reader, info []byte) error {
const op errors.Op = "storage.s3.Save"
ctx, span := observ.StartSpan(ctx, op.String())
defer span.End()
err := moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.upload)
const op errors.Op = "s3.Save"
sp, ctx := opentracing.StartSpanFromContext(ctx, "storage.s3.Save")
defer sp.Finish()
err := moduploader.Upload(ctx, module, version, bytes.NewReader(info), bytes.NewReader(mod), zip, s.upload, s.cdnConf.TimeoutDuration())
// TODO: take out lease on the /list file and add the version to it
//
// Do that only after module source+metadata is uploaded
@@ -130,7 +130,7 @@ func getStores(b *testing.B) []storage.TestSuite {
require.NoError(b, err, "couldn't create filesystem store")
stores = append(stores, fsStore)
mongoStore, err := mongo.NewTestSuite(model)
mongoStore, err := mongo.NewTestSuite(model, testConfigFile)
require.NoError(b, err, "couldn't create mongo store")
stores = append(stores, mongoStore)
@@ -11,6 +11,7 @@ import (
"bytes"
"context"
"io/ioutil"
"path/filepath"
"testing"
"time"
@@ -23,6 +24,10 @@ import (
"github.com/gomods/athens/pkg/storage/mongo"
)
var (
testConfigFile = filepath.Join("..", "..", "..", "..", "config.test.toml")
)
type TestSuites struct {
*suite.Model
storages []storage.TestSuite
@@ -52,7 +57,7 @@ func (d *TestSuites) SetupTest() {
d.storages = append(d.storages, minioStorage)
// mongo
mongoStore, err := mongo.NewTestSuite(d.Model)
mongoStore, err := mongo.NewTestSuite(d.Model, testConfigFile)
ra.NoError(err)
d.storages = append(d.storages, mongoStore)
+7
View File
@@ -0,0 +1,7 @@
#!/bin/bash
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
if [ ! -e "${SCRIPTS_DIR}/../config.toml" ] ; then
cp "${SCRIPTS_DIR}/../config.example.toml" "${SCRIPTS_DIR}/../config.toml"
fi
-16
View File
@@ -1,16 +0,0 @@
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-129
View File
@@ -1,129 +0,0 @@
This package was debianized by Thrift Developer's <dev@thrift.apache.org>.
This package and the Debian packaging is licensed under the Apache License,
see `/usr/share/common-licenses/Apache-2.0'.
The following information was copied from Apache Thrift LICENSE file.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
The following files contain some portions of code contributed under
the Thrift Software License (see doc/old-thrift-license.txt), and relicensed
under the Apache 2.0 License:
compiler/cpp/Makefile.am
compiler/cpp/src/generate/t_cocoa_generator.cc
compiler/cpp/src/generate/t_cpp_generator.cc
compiler/cpp/src/generate/t_csharp_generator.cc
compiler/cpp/src/generate/t_erl_generator.cc
compiler/cpp/src/generate/t_hs_generator.cc
compiler/cpp/src/generate/t_java_generator.cc
compiler/cpp/src/generate/t_ocaml_generator.cc
compiler/cpp/src/generate/t_perl_generator.cc
compiler/cpp/src/generate/t_php_generator.cc
compiler/cpp/src/generate/t_py_generator.cc
compiler/cpp/src/generate/t_rb_generator.cc
compiler/cpp/src/generate/t_st_generator.cc
compiler/cpp/src/generate/t_xsd_generator.cc
compiler/cpp/src/main.cc
compiler/cpp/src/parse/t_field.h
compiler/cpp/src/parse/t_program.h
compiler/cpp/src/platform.h
compiler/cpp/src/thriftl.ll
compiler/cpp/src/thrifty.yy
lib/csharp/src/Protocol/TBinaryProtocol.cs
lib/csharp/src/Protocol/TField.cs
lib/csharp/src/Protocol/TList.cs
lib/csharp/src/Protocol/TMap.cs
lib/csharp/src/Protocol/TMessage.cs
lib/csharp/src/Protocol/TMessageType.cs
lib/csharp/src/Protocol/TProtocol.cs
lib/csharp/src/Protocol/TProtocolException.cs
lib/csharp/src/Protocol/TProtocolFactory.cs
lib/csharp/src/Protocol/TProtocolUtil.cs
lib/csharp/src/Protocol/TSet.cs
lib/csharp/src/Protocol/TStruct.cs
lib/csharp/src/Protocol/TType.cs
lib/csharp/src/Server/TServer.cs
lib/csharp/src/Server/TSimpleServer.cs
lib/csharp/src/Server/TThreadPoolServer.cs
lib/csharp/src/TApplicationException.cs
lib/csharp/src/Thrift.csproj
lib/csharp/src/Thrift.sln
lib/csharp/src/TProcessor.cs
lib/csharp/src/Transport/TServerSocket.cs
lib/csharp/src/Transport/TServerTransport.cs
lib/csharp/src/Transport/TSocket.cs
lib/csharp/src/Transport/TStreamTransport.cs
lib/csharp/src/Transport/TTransport.cs
lib/csharp/src/Transport/TTransportException.cs
lib/csharp/src/Transport/TTransportFactory.cs
lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs
lib/csharp/ThriftMSBuildTask/ThriftBuild.cs
lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj
lib/rb/lib/thrift.rb
lib/st/README
lib/st/thrift.st
test/OptionalRequiredTest.cpp
test/OptionalRequiredTest.thrift
test/ThriftTest.thrift
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the compiler/cpp/src/md5.[ch] components:
/*
Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
L. Peter Deutsch
ghost@aladdin.com
*/
---------------------------------------------------
For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki,
lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components:
Copyright (C) 1999 - 2007 Markus Mottl
Licensed under the terms of the GNU Lesser General Public License 2.1
(see doc/lgpl-2.1.txt for the full terms of this license)
-16
View File
@@ -1,16 +0,0 @@
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-6
View File
@@ -448,9 +448,6 @@ func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
if size < 0 {
return nil, invalidDataLength
}
if uint64(size) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
isize := int(size)
buf := make([]byte, isize)
@@ -481,9 +478,6 @@ func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
if size < 0 {
return "", nil
}
if uint64(size) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
var (
buf bytes.Buffer
-6
View File
@@ -562,9 +562,6 @@ func (p *TCompactProtocol) ReadString() (value string, err error) {
if length < 0 {
return "", invalidDataLength
}
if uint64(length) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
if length == 0 {
return "", nil
@@ -591,9 +588,6 @@ func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
if length < 0 {
return nil, invalidDataLength
}
if uint64(length) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
buf := make([]byte, length)
_, e = io.ReadFull(p.trans, buf)
-202
View File
@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-1
View File
@@ -1 +0,0 @@
client.sh
-239
View File
@@ -1,239 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the lib/nodejs/lib/thrift/json_parse.js:
/*
json_parse.js
2015-05-02
Public Domain.
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
*/
(By Douglas Crockford <douglas@crockford.com>)
--------------------------------------------------
-20
View File
@@ -1,20 +0,0 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
File diff suppressed because it is too large Load Diff
-316
View File
@@ -1,316 +0,0 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}
+9
View File
@@ -0,0 +1,9 @@
workspace:
base: /go
path: src/github.com/bketelsen/buffet
pipeline:
build:
image: golang:1.10
commands:
- make
+2
View File
@@ -0,0 +1,2 @@
dist
.envrc
+21
View File
@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Brian Ketelsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
+97
View File
@@ -0,0 +1,97 @@
IMPORT_PATH := github.com/bketelsen/buffet
exec := $(DOCKER_IMAGE)
github_repo := bketelsen/buffet
GITVERSION ?= dev
# V := 1 # When V is set, print commands and build progress.
.PHONY: all
all: setup test build
.PHONY: build
build: setup
@echo "Building..."
$Q go get ./... && go build $(if $V,-v) $(VERSION_FLAGS)
.PHONY: tags
tags:
@echo "Listing tags..."
$Q @git tag
tag:
@echo "Creating tag" $(GITVERSION)
$Q @git tag -a v$(GITVERSION) -m $(GITVERSION)
@echo "pushing tag" $(GITVERSION)
$Q @git push --tags
.PHONY: release
release: setup build tag
$Q goreleaser
##### ^^^^^^ EDIT ABOVE ^^^^^^ #####
##### =====> Utility targets <===== #####
.PHONY: clean test list format docker
docker:
@echo "Docker Build..."
$Q docker build -t $(DOCKER_IMAGE):$(VERSION) .
clean:
@echo "Clean..."
test:
@echo "Testing..."
$Q go get ./... && go test $(if $V,-v) ./...
ifndef CI
@echo "Testing Outside CI..."
@echo "VGO Vet"
$Q go vet ./...
@echo "VGO test -race"
$Q GODEBUG=cgocheck=2 go test -race
else
@echo "Testing in CI..."
$Q ( go vet ./...; echo $$? ) | \
tee test/vet.txt | sed '$$ d'; exit $$(tail -1 test/vet.txt)
$Q ( GODEBUG=cgocheck=2 go test -v -race ./...; echo $$? ) | \
tee test/output.txt | sed '$$ d'; exit $$(tail -1 test/output.txt)
endif
format: $(GOIMPORTS)
@echo "Formatting..."
$Q find . -iname \*.go | grep -v \
-e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) | xargs $(GOPATH)/bin/goimports -w
##### =====> Internals <===== #####
.PHONY: setup
setup: clean
@echo "Setup..."
if ! grep "dist" .gitignore > /dev/null 2>&1; then \
echo "dist" >> .gitignore; \
fi
go get -u golang.org/x/vgo
go get -u rsc.io/goversion
go get -u golang.org/x/tools/cmd/goimports
VERSION := $(shell git describe --tags --always --dirty="-dev")
DATE := $(shell date -u '+%Y-%m-%d-%H:%M UTC')
VERSION_FLAGS := -ldflags='-X "main.Version=$(VERSION)" -X "main.BuildTime=$(DATE)"'
unexport GOBIN
Q := $(if $V,,@)
GOIMPORTS := $(GOPATH)/bin/goimports
$(GOIMPORTS):
@echo "Checking Import Tool Installation..."
@test -d $(GOPATH)/bin/goimports || \
$Q go install golang.org/x/tools/cmd/goimports
+74
View File
@@ -0,0 +1,74 @@
## buffet
[![Build Status](https://ci.ketelsen.house/api/badges/bketelsen/buffet/status.svg)](https://ci.ketelsen.house/api/badges/bketelsen/buffet)
Buffet is an OpenTracing middleware for [buffalo](gobuffalo.io)
## Usage
In your main:
```
tracer, closer := initTracer()
defer closer.Close()
opentracing.SetGlobalTracer(tracer)
fmt.Println(tracer)
app := actions.App(tracer)
```
initTracer looks like this:
```
func initTracer() (opentracing.Tracer, io.Closer) {
sampler := jaeger.NewConstSampler(true)
transport, err := udp.NewUDPTransport("", 0)
if err != nil {
log.Fatal(err)
}
reporter := jaeger.NewRemoteReporter(transport)
tracer, closer := jaeger.NewTracer(ServiceName, sampler, reporter)
return tracer, closer
}
```
Change your App() function to accept your tracer for initialization, and add
the middleware:
```
func App(tracer opentracing.Tracer) *buffalo.App {
if app == nil {
...
app.Use(buffet.OpenTracing(tracer))
...
```
Then instrument your handlers:
```
// HomeHandler is a default handler to serve up
// a home page.
func HomeHandler(c buffalo.Context) error {
slow(c)
return c.Render(200, r.HTML("index.html"))
}
//BadHandler returns an error
func BadHandler(c buffalo.Context) error {
return c.Error(401, errors.New("Unauthorized!"))
}
func slow(c buffalo.Context) {
sp := buffet.ChildSpan("slow", c)
defer sp.Finish()
time.Sleep(1 * time.Millisecond)
}
```
HomeHandler and BadHandler are automatically instrumented because
of the middleware. The slow() function isn't, so we pass in the buffalo
context and derive a child span from the one in the buffalo context. This
creates a child span that belongs to the parent (which was created automatically in
the middleware).
+106
View File
@@ -0,0 +1,106 @@
package buffet
import (
"strings"
"github.com/gobuffalo/buffalo"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
olog "github.com/opentracing/opentracing-go/log"
)
var tracer opentracing.Tracer
// OpenTracing is a buffalo middleware that adds the necessary
// components to the request to make it traced through OpenTracing.
// Initialize it by passing in an opentracing.Tracer.
func OpenTracing(tr opentracing.Tracer) buffalo.MiddlewareFunc {
tracer = tr
return func(next buffalo.Handler) buffalo.Handler {
return func(c buffalo.Context) error {
opName := "HTTP " + c.Request().Method + c.Request().URL.Path
rt := c.Value("current_route")
if rt != nil {
route, ok := rt.(buffalo.RouteInfo)
if ok {
opName = operation(route.HandlerName)
}
}
wireContext, _ := tr.Extract(
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(c.Request().Header))
// Create the span referring to the RPC client if available.
// If wireContext == nil, a root span will be created.
sp := tr.StartSpan(
opName,
ext.RPCServerOption(wireContext))
ext.HTTPMethod.Set(sp, c.Request().Method)
ext.HTTPUrl.Set(sp, c.Request().URL.String())
ext.Component.Set(sp, "buffalo")
c.Set("otspan", sp)
err := next(c)
if err != nil {
ext.Error.Set(sp, true)
sp.LogFields(olog.Error(err))
}
br, ok := c.Response().(*buffalo.Response)
if ok {
ext.HTTPStatusCode.Set(sp, uint16(br.Status))
}
sp.Finish()
return err
}
}
}
// SpanFromContext attempts to retrieve a span from the Buffalo context,
// returning it if found. If none is found a new one is created.
func SpanFromContext(c buffalo.Context) opentracing.Span {
// fast path - find span in the buffalo context and return it
sp := c.Value("otspan")
if sp != nil {
span, ok := sp.(opentracing.Span)
if ok {
c.LogField("span found", true)
return span
}
}
c.LogField("span found", false)
// none exists, make a new one (sadface)
opName := "HTTP " + c.Request().Method + c.Request().URL.Path
rt := c.Value("current_route")
if rt != nil {
route, ok := rt.(buffalo.RouteInfo)
if ok {
opName = operation(route.HandlerName)
}
}
span := tracer.StartSpan(opName)
ext.HTTPMethod.Set(span, c.Request().Method)
ext.HTTPUrl.Set(span, c.Request().URL.String())
ext.Component.Set(span, "buffalo")
return span
}
// ChildSpan returns a child span derived from the buffalo context "c"
func ChildSpan(opname string, c buffalo.Context) opentracing.Span {
psp := SpanFromContext(c)
sp := tracer.StartSpan(
opname,
opentracing.ChildOf(psp.Context()))
return sp
}
func operation(s string) string {
chunks := strings.Split(s, ".")
return chunks[len(chunks)-1]
}
+89
View File
@@ -0,0 +1,89 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmpopts provides common options for the cmp package.
package cmpopts
import (
"math"
"reflect"
"github.com/google/go-cmp/cmp"
)
func equateAlways(_, _ interface{}) bool { return true }
// EquateEmpty returns a Comparer option that determines all maps and slices
// with a length of zero to be equal, regardless of whether they are nil.
//
// EquateEmpty can be used in conjunction with SortSlices and SortMaps.
func EquateEmpty() cmp.Option {
return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
}
func isEmpty(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
(vx.Len() == 0 && vy.Len() == 0)
}
// EquateApprox returns a Comparer option that determines float32 or float64
// values to be equal if they are within a relative fraction or absolute margin.
// This option is not used when either x or y is NaN or infinite.
//
// The fraction determines that the difference of two values must be within the
// smaller fraction of the two values, while the margin determines that the two
// values must be within some absolute margin.
// To express only a fraction or only a margin, use 0 for the other parameter.
// The fraction and margin must be non-negative.
//
// The mathematical expression used is equivalent to:
// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
//
// EquateApprox can be used in conjunction with EquateNaNs.
func EquateApprox(fraction, margin float64) cmp.Option {
if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
panic("margin or fraction must be a non-negative number")
}
a := approximator{fraction, margin}
return cmp.Options{
cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
}
}
type approximator struct{ frac, marg float64 }
func areRealF64s(x, y float64) bool {
return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
}
func areRealF32s(x, y float32) bool {
return areRealF64s(float64(x), float64(y))
}
func (a approximator) compareF64(x, y float64) bool {
relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
return math.Abs(x-y) <= math.Max(a.marg, relMarg)
}
func (a approximator) compareF32(x, y float32) bool {
return a.compareF64(float64(x), float64(y))
}
// EquateNaNs returns a Comparer option that determines float32 and float64
// NaN values to be equal.
//
// EquateNaNs can be used in conjunction with EquateApprox.
func EquateNaNs() cmp.Option {
return cmp.Options{
cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
}
}
func areNaNsF64s(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}
func areNaNsF32s(x, y float32) bool {
return areNaNsF64s(float64(x), float64(y))
}

Some files were not shown because too many files have changed in this diff Show More