diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2acb044 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +config.yml +nyx-testing.db +nyx +run.sh \ No newline at end of file diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..4a269fc --- /dev/null +++ b/config/config.go @@ -0,0 +1,93 @@ +package config + +import ( + "flag" + "gopkg.in/yaml.v2" + "io/ioutil" + "os" +) + +var configFileName = "./config.yml" + +func init() { + flag.StringVar(&configFileName, "config", "./config.yml", "Config File Location") +} + +type Config struct { + Site SiteConfig `yaml:"site"` // Site/HTML Configuration + DB DBConfig `yaml:"db"` // Database Configuration + HostnameWhiteList []string `yaml:"hosts"` // List of valid hostnames, ignored if empty + ListenOn string `yaml:"listen_on"` // Address & Port to use + MasterSecret string `yaml:"secret"` // Master Secret for keychain + DisableSecurity bool `yaml:"disable_security"` // Disables various flags to ensure non-HTTPS requests work + Captcha CaptchaConfig `yaml:"captcha"` +} + +const ( + CaptchaRecaptcha = "recaptcha" + CaptchaDisabled = "disabled" +) + +type CaptchaConfig struct { + Mode string `yaml:"mode"` // Captcha Mode + Settings map[string]string `yaml:"settings,inline"` +} + +type SiteConfig struct { + Title string `yaml:"title"` // Site Title + Description string `yaml:"description"` // Site Description + PrimaryColor string `yaml:"color"` // Primary Color for Size +} + +type DBConfig struct { + File string `yaml:"file"` + ReadOnly bool `yaml:"read_only"` +} + +func Load() (*Config, error) { + var config = &Config{ + Site: SiteConfig{ + Title: "NyxChan", + PrimaryColor: "#78909c", + Description: "NyxChan Default Configuration", + }, + DB: DBConfig{ + File: ":memory:", + ReadOnly: false, + }, + HostnameWhiteList: []string{}, + ListenOn: ":8080", + MasterSecret: "changeme", + DisableSecurity: false, + Captcha: CaptchaConfig{ + Mode: CaptchaDisabled, + }, + } + if _, err := os.Stat(configFileName); os.IsNotExist(err) { + return config, nil + } + dat, err := ioutil.ReadFile(configFileName) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(dat, config) + if err != nil { + return nil, err + } + return config, nil +} + +func (c Config) IsHostNameValid(hostname string) bool { + if c.HostnameWhiteList == nil { + return true + } + if len(c.HostnameWhiteList) == 0 { + return true + } + for _, v := range c.HostnameWhiteList { + if v == hostname { + return true + } + } + return false +} diff --git a/http/admin/handler.go b/http/admin/handler.go new file mode 100644 index 0000000..f7ddb3b --- /dev/null +++ b/http/admin/handler.go @@ -0,0 +1,119 @@ +package admin + +import ( + "bytes" + "github.com/GeertJohan/go.rice" + "github.com/icza/session" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "html/template" + "net/http" + "time" +) + +var riceConf = rice.Config{ + LocateOrder: []rice.LocateMethod{ + rice.LocateWorkingDirectory, + rice.LocateEmbedded, + rice.LocateAppended, + }, +} + +var box = riceConf.MustFindBox("http/admin/res/") + +var ( + panelTmpl = template.New("admin/panel") + loginTmpl = template.New("admin/login") +) + +func init() { + var err error + panelTmpl, err = panelTmpl.Parse(box.MustString("panel.html")) + if err != nil { + panic(err) + } + loginTmpl, err = loginTmpl.Parse(box.MustString("index.html")) + if err != nil { + panic(err) + } +} + +// Router sets up the Administration Panel +// It **must** be setup on the /admin/ basepath +func Router(r chi.Router) { + r.Get("/", serveLogin) + r.Get("/index.html", serveLogin) + r.Get("/panel.html", servePanel) + r.Post("/new_board.sh", handleNewBoard) + r.Post("/login.sh", handleLogin) + r.Post("/logout.sh", handleLogout) +} + +func serveLogin(w http.ResponseWriter, r *http.Request) { + dat := bytes.NewBuffer([]byte{}) + err := loginTmpl.Execute(dat, middle.GetBaseCtx(r)) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "index.html", time.Now(), + bytes.NewReader(dat.Bytes())) +} + +func servePanel(w http.ResponseWriter, r *http.Request) { + sess := middle.GetSession(r) + if sess == nil { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) + return + } + dat := bytes.NewBuffer([]byte{}) + err := panelTmpl.Execute(dat, middle.GetBaseCtx(r)) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "panel.html", time.Now(), + bytes.NewReader(dat.Bytes())) +} + +func handleLogout(w http.ResponseWriter, r *http.Request) { + sess := middle.GetSession(r) + if sess == nil { + http.Redirect(w, r, "/admin/index.html", http.StatusSeeOther) + } + session.Remove(sess, w) + http.Redirect(w, r, "/admin/index.html", http.StatusSeeOther) +} +func handleLogin(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + if err != nil { + errw.ErrorWriter(err, w, r) + } + db := middle.GetDB(r) + + var admin = &resources.AdminPass{} + err = db.View(func(tx *buntdb.Tx) error { + var err error + admin, err = resources.GetAdmin(tx, r.FormValue("id")) + return err + }) + if err != nil { + err = errw.MakeErrorWithTitle("Access Denied", "User or Password Invalid") + errw.ErrorWriter(err, w, r) + } + err = admin.VerifyLogin(r.FormValue("pass")) + if err != nil { + err = errw.MakeErrorWithTitle("Access Denied", "User or Password Invalid") + errw.ErrorWriter(err, w, r) + } + sess := session.NewSessionOptions(&session.SessOptions{ + CAttrs: map[string]interface{}{"mode": "admin"}, + }) + session.Add(sess, w) + + http.Redirect(w, r, "/admin/panel.html", http.StatusSeeOther) +} diff --git a/http/admin/newboard.go b/http/admin/newboard.go new file mode 100644 index 0000000..ef981bc --- /dev/null +++ b/http/admin/newboard.go @@ -0,0 +1,58 @@ +package admin + +import ( + "errors" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "net/http" +) + +func handleNewBoard(w http.ResponseWriter, r *http.Request) { + sess := middle.GetSession(r) + if sess == nil { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) + return + } + if sess.CAttr("mode") != "admin" { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) + return + } + + err := r.ParseForm() + if err != nil { + errw.ErrorWriter(err, w, r) + } + db := middle.GetDB(r) + + var board = &resources.Board{} + + board.ShortName = r.FormValue("shortname") + board.LongName = r.FormValue("longname") + + if board.ShortName == "" { + errw.ErrorWriter(errors.New("Need shortname"), w, r) + return + } + + if board.ShortName == "admin" && board.ShortName == "@" { + errw.ErrorWriter(errors.New("No"), w, r) + } + + if board.LongName == "" && len(board.LongName) < 5 { + errw.ErrorWriter(errors.New("Need 5 characters for long name"), w, r) + return + } + + if err = db.Update(func(tx *buntdb.Tx) error { + return resources.NewBoard(tx, r.Host, board) + }); err != nil { + errw.ErrorWriter(err, w, r) + return + } + + http.Redirect(w, r, "/admin/panel.html", http.StatusSeeOther) +} diff --git a/http/admin/res/index.html b/http/admin/res/index.html new file mode 100644 index 0000000..ad7e0d2 --- /dev/null +++ b/http/admin/res/index.html @@ -0,0 +1,41 @@ + + + + + + {{.Config.Site.Title}} Admin Login + + + + + +
+
+ +
+ +
+
+ +
+
+ + +
+
+
+ + \ No newline at end of file diff --git a/http/admin/res/panel.html b/http/admin/res/panel.html new file mode 100644 index 0000000..048bdfe --- /dev/null +++ b/http/admin/res/panel.html @@ -0,0 +1,57 @@ + + + + + + {{.Config.Site.Title}} Admin Panel + + + + + +
+ Welcome {{.Admin.Id}}
+
+ + +
+
+
+
+ + + + + +
+
+
+
+ + + + +
+
+
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/http/board/board.go b/http/board/board.go new file mode 100644 index 0000000..4101b4e --- /dev/null +++ b/http/board/board.go @@ -0,0 +1,52 @@ +package board + +import ( + "bytes" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "log" + "net/http" + "time" +) + +func serveBoard(w http.ResponseWriter, r *http.Request) { + dat := bytes.NewBuffer([]byte{}) + db := middle.GetDB(r) + ctx := middle.GetBaseCtx(r) + err := db.View(func(tx *buntdb.Tx) error { + bName := chi.URLParam(r, "board") + b, err := resources.GetBoard(tx, r.Host, bName) + if err != nil { + return err + } + ctx["Board"] = b + + threads, err := resources.ListThreads(tx, r.Host, bName) + if err != nil { + return err + } + log.Println("Number of Thread on board: ", len(threads)) + + for k := range threads { + err := resources.FillReplies(tx, r.Host, threads[k]) + if err != nil { + return err + } + } + ctx["Threads"] = threads + return nil + }) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + err = boardTmpl.Execute(dat, ctx) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "board.html", time.Now(), bytes.NewReader(dat.Bytes())) +} diff --git a/http/board/handler.go b/http/board/handler.go new file mode 100644 index 0000000..c4e3369 --- /dev/null +++ b/http/board/handler.go @@ -0,0 +1,88 @@ +package board + +import ( + "bytes" + "github.com/GeertJohan/go.rice" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "html/template" + "net/http" + "time" +) + +var riceConf = rice.Config{ + LocateOrder: []rice.LocateMethod{ + rice.LocateWorkingDirectory, + rice.LocateEmbedded, + rice.LocateAppended, + }, +} + +var box = riceConf.MustFindBox("http/board/res/") + +var ( + dirTmpl = template.New("board/dir") + boardTmpl = template.New("board/board") + threadTmpl = template.New("board/thread") + + hdlFMap = template.FuncMap{ + "renderText": resources.OperateReplyText, + } +) + +func init() { + var err error + dirTmpl, err = dirTmpl.Parse(box.MustString("dir.html")) + if err != nil { + panic(err) + } + boardTmpl, err = boardTmpl.Funcs(hdlFMap).Parse(box.MustString("board.html")) + if err != nil { + panic(err) + } + threadTmpl, err = threadTmpl.Funcs(hdlFMap).Parse(box.MustString("thread.html")) + if err != nil { + panic(err) + } +} + +func Router(r chi.Router) { + r.Get("/", serveDir) + r.Get("/dir.html", serveDir) + r.Get("/:board/board.html", serveBoard) + r.Post("/:board/new_thread.sh", handleNewThread) + r.Get("/:board/:thread/thread.html", serveThread) + r.Get("/:board/:thread/:post/post.html", servePost) + r.Post("/:board/:thread/reply.sh", handleNewReply) +} + +func servePost(w http.ResponseWriter, r *http.Request) { + return +} + +func serveDir(w http.ResponseWriter, r *http.Request) { + dat := bytes.NewBuffer([]byte{}) + db := middle.GetDB(r) + ctx := middle.GetBaseCtx(r) + err := db.View(func(tx *buntdb.Tx) error { + bList, err := resources.ListBoards(tx, r.Host) + if err != nil { + return err + } + ctx["Boards"] = bList + return nil + }) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + err = dirTmpl.Execute(dat, ctx) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "dir.html", time.Now(), bytes.NewReader(dat.Bytes())) +} diff --git a/http/board/newreply.go b/http/board/newreply.go new file mode 100644 index 0000000..632c8dc --- /dev/null +++ b/http/board/newreply.go @@ -0,0 +1,59 @@ +package board + +import ( + "fmt" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "net/http" + "strconv" +) + +func handleNewReply(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + + var reply = &resources.Reply{} + + reply.Board = chi.URLParam(r, "board") + tid, err := strconv.Atoi(chi.URLParam(r, "thread")) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + reply.Thread = int64(tid) + reply.Text = r.FormValue("text") + if len(reply.Text) > 1000 { + errw.ErrorWriter(errw.MakeErrorWithTitle("I'm sorry but I can't do that", "These are too many characters"), w, r) + return + } + if len(reply.Text) < 10 { + errw.ErrorWriter(errw.MakeErrorWithTitle("I'm sorry but I can't do that", "These are not enough characters"), w, r) + return + } + reply.Metadata = map[string]string{} + if r.FormValue("tripcode") != "" { + reply.Metadata["trip"] = resources.CalcTripCode(r.FormValue("tripcode")) + } else { + reply.Metadata["trip"] = "Anonymous" + } + + db := middle.GetDB(r) + if err = db.Update(func(tx *buntdb.Tx) error { + thread, err := resources.GetThread(tx, r.Host, reply.Board, reply.Thread) + if err != nil { + return err + } + return resources.NewReply(tx, r.Host, reply.Board, thread, reply, false) + }); err != nil { + errw.ErrorWriter(err, w, r) + return + } + + http.Redirect(w, r, fmt.Sprintf("/%s/%d/thread.html", chi.URLParam(r, "board"), reply.Thread), http.StatusSeeOther) +} diff --git a/http/board/newthread.go b/http/board/newthread.go new file mode 100644 index 0000000..37c335b --- /dev/null +++ b/http/board/newthread.go @@ -0,0 +1,48 @@ +package board + +import ( + "fmt" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "net/http" +) + +func handleNewThread(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + + var thread = &resources.Thread{} + var mainReply = &resources.Reply{} + + mainReply.Board = chi.URLParam(r, "board") + thread.Board = chi.URLParam(r, "board") + mainReply.Text = r.FormValue("text") + if len(mainReply.Text) > 1000 { + errw.ErrorWriter(errw.MakeErrorWithTitle("I'm sorry but I can't do that", "These are too many characters"), w, r) + return + } + if len(mainReply.Text) < 10 { + errw.ErrorWriter(errw.MakeErrorWithTitle("I'm sorry but I can't do that", "These are not enough characters"), w, r) + return + } + mainReply.Metadata = map[string]string{} + if r.FormValue("tripcode") != "" { + mainReply.Metadata["trip"] = resources.CalcTripCode(r.FormValue("tripcode")) + } + + db := middle.GetDB(r) + if err = db.Update(func(tx *buntdb.Tx) error { + return resources.NewThread(tx, r.Host, mainReply.Board, thread, mainReply) + }); err != nil { + errw.ErrorWriter(err, w, r) + return + } + + http.Redirect(w, r, fmt.Sprintf("/%s/%d/thread.html", chi.URLParam(r, "board"), thread.ID), http.StatusSeeOther) +} diff --git a/http/board/res/board.html b/http/board/res/board.html new file mode 100644 index 0000000..d5edf85 --- /dev/null +++ b/http/board/res/board.html @@ -0,0 +1,119 @@ + + + + + + {{.Config.Site.Title}} - /{{.Board.ShortName}}/ + + + + + +{{ $boardlink := .Board.ShortName }} +
+
+ + + + + + + + + + + + + + + {{ if ne .Config.Captcha.Mode "disabled" }} + + + + + {{ end }} + + + + + +
+ TripCode + + + +
+ Comment + + +
+ Image File + + +
+ Captcha + + + + +
+ + + +
+
+
+
+
+ {{range .Threads}} + {{ $threadrid := .GetReply.ID }} + + No.{{.ID}} +
+ {{ renderText .GetReply.Text}} +
+ {{range .GetReplies}} + {{ if ne .ID $threadrid }} + + +
>> + + No.{{.ID}} +
+ {{ renderText .Text}} +
+
+ {{end}} + {{end}} +

+ {{end}} +
+ + \ No newline at end of file diff --git a/http/board/res/dir.html b/http/board/res/dir.html new file mode 100644 index 0000000..19ec8a3 --- /dev/null +++ b/http/board/res/dir.html @@ -0,0 +1,30 @@ + + + + + + {{.Config.Site.Title}} Boards + + + + + +
+
+

Boards

+
+
+ +
+
+ + \ No newline at end of file diff --git a/http/board/res/thread.html b/http/board/res/thread.html new file mode 100644 index 0000000..eb6c9b0 --- /dev/null +++ b/http/board/res/thread.html @@ -0,0 +1,129 @@ + + + + + + {{.Config.Site.Title}} - /{{.Board.ShortName}}/ + + + + + +{{ $boardlink := .Board.ShortName }} +
+
+
+ + + + + + + + + + + + + + + {{ if ne .Config.Captcha.Mode "disabled" }} + + + + + {{ end }} + + + + + {{ if .Board.Metadata.rules }} + + + + + {{ end }} + +
+ TripCode + + + +
+ Comment + + +
+ Image File + + +
+ Captcha + + + + +
+ + + +
+ Rules + + {{ .Board.Metadata.rules }} +
+
+
+
+ {{with .Thread }} + {{ $threadrid := .GetReply.ID }} + + No.{{.ID}} +
+ {{ renderText .GetReply.Text}} +
+ {{range .GetReplies}} + {{ if ne .ID $threadrid }} + + +
>> + + No.{{.ID}} +
+ {{ renderText .Text}} +
+
+ {{end}} + {{end}} +

+ {{end}} +
+ + \ No newline at end of file diff --git a/http/board/thread.go b/http/board/thread.go new file mode 100644 index 0000000..ba5fbe0 --- /dev/null +++ b/http/board/thread.go @@ -0,0 +1,57 @@ +package board + +import ( + "bytes" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "net/http" + "strconv" + "time" +) + +func serveThread(w http.ResponseWriter, r *http.Request) { + dat := bytes.NewBuffer([]byte{}) + db := middle.GetDB(r) + ctx := middle.GetBaseCtx(r) + err := db.View(func(tx *buntdb.Tx) error { + bName := chi.URLParam(r, "board") + b, err := resources.GetBoard(tx, r.Host, bName) + if err != nil { + return err + } + ctx["Board"] = b + + id, err := strconv.Atoi(chi.URLParam(r, "thread")) + if err != nil { + return err + } + thread, err := resources.GetThread(tx, r.Host, bName, int64(id)) + if err != nil { + return err + } + + err = resources.FillReplies(tx, r.Host, thread) + if err != nil { + return err + } + + if err != nil { + return err + } + ctx["Thread"] = thread + return nil + }) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + err = threadTmpl.Execute(dat, ctx) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "board.html", time.Now(), bytes.NewReader(dat.Bytes())) +} diff --git a/http/errw/handler.go b/http/errw/handler.go new file mode 100644 index 0000000..a1c854d --- /dev/null +++ b/http/errw/handler.go @@ -0,0 +1,77 @@ +package errw + +import ( + "errors" + "github.com/GeertJohan/go.rice" + "github.com/pressly/chi/middleware" + "go.rls.moe/nyx/http/middle" + "html/template" + "net/http" +) + +var riceConf = rice.Config{ + LocateOrder: []rice.LocateMethod{ + rice.LocateWorkingDirectory, + rice.LocateEmbedded, + rice.LocateAppended, + }, +} + +var box = riceConf.MustFindBox("http/errw/res/") + +var ( + errorTmpl = template.New("errw/error") +) + +func init() { + var err error + errorTmpl, err = errorTmpl.Parse(box.MustString("error.html")) + if err != nil { + panic(err) + } +} + +type ErrorWithTitle interface { + error + ErrorTitle() string +} + +type errorWTInt struct { + message, title string +} + +func (e errorWTInt) Error() string { + return e.message +} + +func (e errorWTInt) ErrorTitle() string { + return e.title +} + +func MakeErrorWithTitle(title, message string) ErrorWithTitle { + return errorWTInt{message, title} +} + +func ErrorWriter(err error, w http.ResponseWriter, r *http.Request) { + ctx := middle.GetBaseCtx(r) + + if err == nil { + ErrorWriter(errors.New("Unknonw Error"), w, r) + } + + if errWT, ok := err.(ErrorWithTitle); ok { + ctx["Error"] = map[string]string{ + "Code": middleware.GetReqID(r.Context()), + "Description": errWT.Error(), + "Title": errWT.ErrorTitle(), + } + } else { + ctx["Error"] = map[string]string{ + "Code": middleware.GetReqID(r.Context()), + "Description": err.Error(), + "Title": "Error", + } + } + errorTmpl.Execute(w, ctx) + return +} diff --git a/http/errw/res/error.html b/http/errw/res/error.html new file mode 100644 index 0000000..e7b9538 --- /dev/null +++ b/http/errw/res/error.html @@ -0,0 +1,35 @@ + + + + + + {{.Config.Site.Title}} Admin Login + + + +
+

{{.Error.Title}}


+

{{.Error.Code}}


+

{{.Error.Description}}

+
+ + \ No newline at end of file diff --git a/http/middle/base.go b/http/middle/base.go new file mode 100644 index 0000000..e073cd5 --- /dev/null +++ b/http/middle/base.go @@ -0,0 +1,21 @@ +package middle + +import ( + "github.com/justinas/nosurf" + "github.com/pressly/chi/middleware" + "net/http" +) + +func GetBaseCtx(r *http.Request) map[string]interface{} { + val := map[string]interface{}{ + "Config": GetConfig(r), + "ReqID": middleware.GetReqID(r.Context()), + "CSRFToken": nosurf.Token(r), + } + + return val +} + +func CSRFProtect(next http.Handler) http.Handler { + return nosurf.New(next) +} diff --git a/http/middle/config.go b/http/middle/config.go new file mode 100644 index 0000000..5b0f8a9 --- /dev/null +++ b/http/middle/config.go @@ -0,0 +1,24 @@ +package middle + +import ( + "context" + "go.rls.moe/nyx/config" + "net/http" +) + +func ConfigCtx(config *config.Config) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(context.WithValue(r.Context(), configKey, config)) + next.ServeHTTP(w, r) + }) + } +} + +func GetConfig(r *http.Request) *config.Config { + val := r.Context().Value(configKey) + if val == nil { + panic("Config Middleware not configured") + } + return val.(*config.Config) +} diff --git a/http/middle/ctx.go b/http/middle/ctx.go new file mode 100644 index 0000000..c090102 --- /dev/null +++ b/http/middle/ctx.go @@ -0,0 +1,9 @@ +package middle + +type ctxKey int64 + +const ( + configKey ctxKey = iota + dbCtxKey + sessionKey +) diff --git a/http/middle/db.go b/http/middle/db.go new file mode 100644 index 0000000..7553a54 --- /dev/null +++ b/http/middle/db.go @@ -0,0 +1,34 @@ +package middle + +import ( + "context" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/config" + "go.rls.moe/nyx/resources" + "net/http" +) + +func Database(c *config.Config) (func(http.Handler) http.Handler, error) { + db, err := buntdb.Open(c.DB.File) + if err != nil { + return nil, err + } + if err = resources.InitialSetup(db); err != nil { + return nil, err + } + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(context.WithValue(r.Context(), + dbCtxKey, db)) + next.ServeHTTP(w, r) + }) + }, nil +} + +func GetDB(r *http.Request) *buntdb.DB { + val := r.Context().Value(dbCtxKey) + if val == nil { + panic("DB Middleware not configured") + } + return val.(*buntdb.DB) +} diff --git a/http/middle/session.go b/http/middle/session.go new file mode 100644 index 0000000..b62384c --- /dev/null +++ b/http/middle/session.go @@ -0,0 +1,15 @@ +package middle + +import ( + "github.com/icza/session" + "net/http" +) + +func init() { + session.Global.Close() + session.Global = session.NewCookieManager(session.NewInMemStore()) +} + +func GetSession(r *http.Request) session.Session { + return session.Get(r) +} diff --git a/http/res/admin.css b/http/res/admin.css new file mode 100644 index 0000000..6e4f88f --- /dev/null +++ b/http/res/admin.css @@ -0,0 +1,26 @@ +/* CUSTOM CSS */ +div.admin.login { + border: 1px solid black; + width: 500px; + margin: auto; + margin-top: 100px; +} +.admin.form.row { + margin: auto; + padding: 5px; + width: 90%; + height: 22px; + left: 0; + right: 0; + display: flex; +} +.admin.form.input { + font-family: "monospace"; + width: 100%; + height: 100%; + padding: 2px; + display: inline; +} +.admin.form.input.halfsize { + width: 50%; +} \ No newline at end of file diff --git a/http/res/custom.css b/http/res/custom.css new file mode 100644 index 0000000..c571b1e --- /dev/null +++ b/http/res/custom.css @@ -0,0 +1,21 @@ +h1 { + font-size: 32px; +} + +h2 { + font-size: 24px; +} + +h3 { + font-size: 16px; +} + +div { + display: block; + margin: 0; + padding: 0; +} + +blockquote blockquote { max-width: 80%; word-wrap: break-word; white-space: normal; } + +.reply blockquote, blockquote :last-child { max-width: 80%; word-wrap: break-word; white-space: normal; } \ No newline at end of file diff --git a/http/res/style.css b/http/res/style.css new file mode 100644 index 0000000..551369c --- /dev/null +++ b/http/res/style.css @@ -0,0 +1,157 @@ +/* The following CSS is mostly taken from Wakaba, big thanks for the devs there! <3 */ + +html, body { + background:#FFFFEE; + color:#800000; +} +a { + color:#0000EE; +} +a:hover { + color:#DD0000; +} +.adminbar { + text-align:right; + clear:both; + float:right; +} +.logo { + clear:both; + text-align:center; + font-size:2em; + color:#800000; + width:100%; +} +.theader { + background:#E04000; + text-align:center; + padding:2px; + color:#FFFFFF; + width:100%; +} +.postarea { +} +.rules { + font-size:0.7em; +} +.postblock { + background:#EEAA88; + color:#800000; + font-weight:800; +} +.footer { + text-align:center; + font-size:12px; + font-family:serif; +} +.passvalid { + background:#EEAA88; + text-align:center; + width:100%; + color:#ffffff; +} +.dellist { + font-weight: bold; + text-align:center; +} +.delbuttons { + text-align:center; + padding-bottom:4px; + +} +.managehead { + background:#AAAA66; + color:#400000; + padding:0px; +} +.postlists { + background:#FFFFFF; + width:100%; + padding:0px; + color:#800000; +} +.row1 { + background:#EEEECC; + color:#800000; +} +.row2 { + background:#DDDDAA; + color:#800000; +} +.unkfunc { + background:inert; + color:#789922; +} +.filesize { + text-decoration:none; +} +.filetitle { + background:inherit; + font-size:1.2em; + color:#CC1105; + font-weight:800; +} +.postername { + color:#117743; + font-weight:bold; +} +.postertrip { + color:#228854; +} +.oldpost { + color:#CC1105; + font-weight:800; +} +.omittedposts { + color:#707070; +} +.reply { + background:#F0E0D6; + color:#800000; +} +.doubledash { + vertical-align:top; + clear:both; + float:left; +} +.replytitle { + font-size: 1.2em; + color:#CC1105; + font-weight:800; +} +.commentpostername { + color:#117743; + font-weight:800; +} +.thumbnailmsg { + font-size: small; + color:#800000; +} + + + +.abbrev { + color:#707070; +} +.highlight { + background:#F0E0D6; + color:#800000; + border: 2px dashed #EEAA88; +} + +/* From pl files */ + +/* futaba_style.pl */ + +blockquote blockquote { margin-left: 0em; } +form { margin-bottom: 0px } +form .trap { display:none } +.postarea { text-align: center } +.postarea table { margin: 0px auto; text-align: left } +.thumb { border: none; float: left; margin: 2px 20px } +.nothumb { float: left; background: #eee; border: 2px dashed #aaa; text-align: center; margin: 2px 20px; padding: 1em 0.5em 1em 0.5em; } +.reply blockquote, blockquote :last-child { margin-bottom: 0em; } +.reflink a { color: inherit; text-decoration: none } +.reply .filesize { margin-left: 20px } +.userdelete { float: right; text-align: center; white-space: nowrap } +.replypage .replylink { display: none } \ No newline at end of file diff --git a/http/server.go b/http/server.go new file mode 100644 index 0000000..a8b2ca1 --- /dev/null +++ b/http/server.go @@ -0,0 +1,53 @@ +package http + +import ( + "fmt" + "github.com/GeertJohan/go.rice" + "github.com/pressly/chi" + "github.com/pressly/chi/middleware" + "go.rls.moe/nyx/config" + "go.rls.moe/nyx/http/admin" + "go.rls.moe/nyx/http/board" + "go.rls.moe/nyx/http/middle" + "net/http" +) + +var riceConf = rice.Config{ + LocateOrder: []rice.LocateMethod{ + rice.LocateWorkingDirectory, + rice.LocateEmbedded, + rice.LocateAppended, + }, +} + +func Start(config *config.Config) { + r := chi.NewRouter() + + fmt.Println("Setting up Router") + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + r.Use(middleware.CloseNotify) + r.Use(middleware.DefaultCompress) + + r.Use(middle.ConfigCtx(config)) + + r.Use(middle.CSRFProtect) + { + mw, err := middle.Database(config) + if err != nil { + panic(err) + } + r.Use(mw) + } + + r.Route("/admin/", admin.Router) + { + box := riceConf.MustFindBox("http/res") + atFileServer := http.StripPrefix("/@/", http.FileServer(box.HTTPBox())) + r.Mount("/@/", atFileServer) + } + r.Group(board.Router) + + fmt.Println("Setup Complete, Starting Web Server...") + http.ListenAndServe(config.ListenOn, r) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..dd61254 --- /dev/null +++ b/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + "go.rls.moe/nyx/config" + "go.rls.moe/nyx/http" +) + +func main() { + c, err := config.Load() + if err != nil { + fmt.Printf("Could not read configuration: %s\n", err) + return + } + + fmt.Println("Starting Server") + http.Start(c) +} diff --git a/resources/adminpass.go b/resources/adminpass.go new file mode 100644 index 0000000..c2e9167 --- /dev/null +++ b/resources/adminpass.go @@ -0,0 +1,67 @@ +package resources + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/hlandau/passlib" + "github.com/tidwall/buntdb" +) + +type AdminPass struct { + ID string `json:"id"` + Password string `json:"password"` +} + +func (a *AdminPass) HashLogin(pass string) error { + var err error + a.Password, err = passlib.Hash(pass) + return err +} + +func (a *AdminPass) VerifyLogin(pass string) error { + var err error + err = passlib.VerifyNoUpgrade(pass, a.Password) + return err +} + +func NewAdmin(tx *buntdb.Tx, in *AdminPass) error { + dat, err := json.Marshal(in) + if err != nil { + return err + } + _, replaced, err := tx.Set( + fmt.Sprintf(adminPassPath, escapeString(in.ID)), + string(dat), + nil) + if err != nil { + return err + } + if replaced { + return errors.New("Admin already exists") + } + return nil +} + +func GetAdmin(tx *buntdb.Tx, id string) (*AdminPass, error) { + var ret = &AdminPass{} + dat, err := tx.Get( + fmt.Sprintf(adminPassPath, escapeString(id)), + ) + if err != nil { + return nil, err + } + if err = json.Unmarshal([]byte(dat), ret); err != nil { + return nil, err + } + return ret, nil +} + +func DelAdmin(tx *buntdb.Tx, id string) error { + if _, err := tx.Delete( + fmt.Sprintf(adminPassPath, escapeString(id)), + ); err != nil { + return err + } + return nil +} diff --git a/resources/board.go b/resources/board.go new file mode 100644 index 0000000..d819bab --- /dev/null +++ b/resources/board.go @@ -0,0 +1,78 @@ +package resources + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/tidwall/buntdb" +) + +type Board struct { + ShortName string `json:"short"` + LongName string `json:"long"` + Metadata Metadata `json:"meta"` +} + +func NewBoard(tx *buntdb.Tx, hostname string, in *Board) error { + dat, err := json.Marshal(in) + if err != nil { + return err + } + _, replaced, err := tx.Set( + fmt.Sprintf(boardPath, escapeString(hostname), escapeString(in.ShortName)), + string(dat), + nil) + if err != nil { + return err + } + if replaced { + return errors.New("Board " + escapeString(in.ShortName) + " already exists") + } + return nil +} + +func TestBoard(tx *buntdb.Tx, hostname, shortname string) (error) { + _, err := tx.Get( + fmt.Sprintf(boardPath, escapeString(hostname), escapeString(shortname)), + ) + return err +} + +func GetBoard(tx *buntdb.Tx, hostname, shortname string) (*Board, error) { + var ret = &Board{} + dat, err := tx.Get( + fmt.Sprintf(boardPath, escapeString(hostname), escapeString(shortname)), + ) + if err != nil { + return nil, err + } + if err = json.Unmarshal([]byte(dat), ret); err != nil { + return nil, err + } + return ret, nil +} + +func DelBoard(tx *buntdb.Tx, hostname, shortname string) error { + if _, err := tx.Delete( + fmt.Sprintf(boardPath, escapeString(hostname), escapeString(shortname)), + ); err != nil { + return err + } + return nil +} + +func ListBoards(tx *buntdb.Tx, hostname string) ([]*Board, error) { + var boardList = []*Board{} + var err error + tx.AscendKeys(fmt.Sprintf(boardPath, escapeString(hostname), "*"), + func(key, value string) bool { + var board = &Board{} + err = json.Unmarshal([]byte(value), board) + if err != nil { + return false + } + boardList = append(boardList, board) + return true + }) + return boardList, err +} diff --git a/resources/db.go b/resources/db.go new file mode 100644 index 0000000..f58a803 --- /dev/null +++ b/resources/db.go @@ -0,0 +1,114 @@ +package resources + +import ( + "errors" + "fmt" + "github.com/tidwall/buntdb" + "strings" +) + +const ( + setup = "/jack/setup" + hostEnable = "/jack/%s/enabled" + boardPath = "/jack/%s/board/%s/board-data" + threadPath = "/jack/%s/board/%s/thread/%032d/thread-data" + threadSPath = "/jack/%s/board/%s/thread/*/thread-data" + replyPath = "/jack/%s/board/%s/thread/%032d/reply/%032d/reply-data" + replySPath = "/jack/%s/board/%s/thread/%032d/reply/*/reply-data" + modPassPath = "/jack/%s/pass/mod/%s/mod-data" + adminPassPath = "/jack/./pass/admin/%s/admin-data" +) + +func InitialSetup(db *buntdb.DB) error { + return db.Update(func(tx *buntdb.Tx) error { + if _, err := tx.Get(setup); err != nil { + fmt.Println("") + if err != buntdb.ErrNotFound { + fmt.Println("DB setup not known.") + return err + } + fmt.Println("DB not setup.") + tx.Set(setup, "yes", nil) + } else { + fmt.Println("DB setup.") + return nil + } + + fmt.Println("Creating Indices") + err := tx.CreateIndex("board/short", "/jack/*/board/*/board-data", buntdb.IndexJSON("short")) + if err != nil { + return err + } + err = tx.CreateIndex("replies", "/jack/*/board/*/thread/*/reply/*/reply-data", buntdb.IndexJSON("thread")) + if err != nil { + return err + } + err = tx.CreateIndex("board/thread", "/jack/*/board/*/thread/*/thread-data", buntdb.IndexJSON("board")) + if err != nil { + return err + } + + fmt.Println("Creating default admin") + admin := &AdminPass{ + ID: "admin", + } + err = admin.HashLogin("admin") + if err != nil { + return err + } + fmt.Println("Saving default admin to DB") + err = NewAdmin(tx, admin) + if err != nil { + return err + } + + fmt.Println("Committing setup...") + + return nil + }) +} + +func CreateHost(db *buntdb.DB, hostname string) error { + return db.Update(func(tx *buntdb.Tx) error { + hostname = escapeString(hostname) + _, replaced, err := tx.Set(fmt.Sprintf(hostEnable, "hostname"), "", nil) + if err != nil { + tx.Rollback() + return err + } + if replaced { + tx.Rollback() + return errors.New("Hostname already enabled") + } + + board := &Board{ + ShortName: "d", + LongName: "default", + Metadata: map[string]string{ + "locked": "true", + "description": "Default Board", + }, + } + err = NewBoard(tx, hostname, board) + if err != nil { + tx.Rollback() + return err + } + + return nil + }) +} + +func escapeString(in string) string { + in = strings.Replace(in, ".", ".dot.", -1) + in = strings.Replace(in, "-", ".minus.", -1) + in = strings.Replace(in, "\\", ".backslash.", -1) + in = strings.Replace(in, "*", ".star.", -1) + in = strings.Replace(in, "?", ".ask.", -1) + in = strings.Replace(in, "/", ".slash.", -1) + in = strings.Replace(in, "@", ".at.", -1) + in = strings.Replace(in, ">>", ".quote.", -1) + in = strings.Replace(in, ">", ".arrow-left.", -1) + in = strings.Replace(in, "<", ".arrow-right.", -1) + return in +} diff --git a/resources/ids.go b/resources/ids.go new file mode 100644 index 0000000..17437b6 --- /dev/null +++ b/resources/ids.go @@ -0,0 +1,17 @@ +package resources + +import ( + "go.rls.moe/nyx/resources/snowflakes" + "time" +) + +var fountain = snowflakes.Generator{ + StartTime: time.Date( + 2017, 03, 11, + 11, 12, 29, + 0, time.UTC).Unix(), +} + +func getID() (int64, error) { + return fountain.NewID() +} diff --git a/resources/metadata.go b/resources/metadata.go new file mode 100644 index 0000000..befa9d0 --- /dev/null +++ b/resources/metadata.go @@ -0,0 +1,3 @@ +package resources + +type Metadata map[string]string diff --git a/resources/modpass.go b/resources/modpass.go new file mode 100644 index 0000000..b1a56aa --- /dev/null +++ b/resources/modpass.go @@ -0,0 +1,72 @@ +package resources + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/hlandau/passlib" + "github.com/tidwall/buntdb" +) + +type ModPass struct { + ID string `json:"id"` + Password string `json:"password"` + Board string `json:"board"` +} + +func (m *ModPass) HashLogin(pass string) error { + var err error + m.Password, err = passlib.Hash(pass) + return err +} + +func (m *ModPass) VerifyLogin(pass string) error { + var err error + err = passlib.VerifyNoUpgrade(pass, m.Password) + return err +} + +func NewMod(tx *buntdb.Tx, host string, in *ModPass) error { + dat, err := json.Marshal(in) + if err != nil { + tx.Rollback() + return err + } + _, replaced, err := tx.Set( + fmt.Sprintf(modPassPath, escapeString(host), escapeString(in.ID)), + string(dat), + nil) + if err != nil { + tx.Rollback() + return err + } + if replaced { + tx.Rollback() + return errors.New("Admin already exists") + } + return nil +} + +func GetMod(tx *buntdb.Tx, host, id string) (*ModPass, error) { + var ret = &ModPass{} + dat, err := tx.Get( + fmt.Sprintf(modPassPath, escapeString(host), escapeString(id)), + ) + if err != nil { + return nil, err + } + if err = json.Unmarshal([]byte(dat), ret); err != nil { + return nil, err + } + return ret, nil +} + +func DelMod(tx *buntdb.Tx, host, id string) error { + if _, err := tx.Delete( + fmt.Sprintf(modPassPath, escapeString(host), escapeString(id)), + ); err != nil { + tx.Rollback() + return err + } + return nil +} diff --git a/resources/reply.go b/resources/reply.go new file mode 100644 index 0000000..f7cd38b --- /dev/null +++ b/resources/reply.go @@ -0,0 +1,113 @@ +package resources + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "github.com/tidwall/buntdb" + "golang.org/x/crypto/blake2b" +) + +type Reply struct { + ID int64 `json:"id"` + Text string `json:"text"` + Image []byte `json:"image"` + Thread int64 `json:"thread"` + Board string `json:"board"` + Metadata Metadata `json:"meta"` +} + +func NewReply(tx *buntdb.Tx, host, board string, thread *Thread, in *Reply, noId bool) error { + var err error + + if !noId { + in.ID, err = getID() + if err != nil { + return err + } + } else { + } + + dat, err := json.Marshal(in) + if err != nil { + return err + } + + err = TestThread(tx, host, in.Board, in.Thread) + if err != nil { + return err + } + + _, replaced, err := tx.Set( + fmt.Sprintf(replyPath, escapeString(host), escapeString(board), thread.ID, in.ID), + string(dat), + nil) + if err != nil { + return err + } + if replaced { + return errors.New("Admin already exists") + } + return nil +} + +func GetReply(tx *buntdb.Tx, host, board string, thread, id int64) (*Reply, error) { + var ret = &Reply{} + dat, err := tx.Get( + fmt.Sprintf(replyPath, escapeString(host), escapeString(board), thread, id), + ) + if err != nil { + return nil, err + } + if err = json.Unmarshal([]byte(dat), ret); err != nil { + return nil, err + } + return ret, nil +} + +func DelReply(tx *buntdb.Tx, host, board string, thread, id int64) error { + if _, err := tx.Delete( + fmt.Sprintf(replyPath, escapeString(host), escapeString(board), thread, id), + ); err != nil { + return err + } + return nil +} + +func ListReplies(tx *buntdb.Tx, host, board string, thread int64) ([]*Reply, error) { + var replyList = []*Reply{} + var err error + + err = TestThread(tx, host, board, thread) + if err != nil { + return nil, err + } + + tx.DescendKeys( + fmt.Sprintf( + replySPath, + escapeString(host), + escapeString(board), + thread, + ), + func(key, value string) bool { + var reply = &Reply{} + err = json.Unmarshal([]byte(value), reply) + if err != nil { + return false + } + replyList = append(replyList, reply) + if len(replyList) >= 100 { + return false + } + return true + }) + + return replyList, err +} + +func CalcTripCode(trip string) string { + fullTrip := blake2b.Sum256([]byte(trip)) + return base64.RawStdEncoding.EncodeToString(fullTrip[:8]) +} diff --git a/resources/snowflakes/LICENSE b/resources/snowflakes/LICENSE new file mode 100644 index 0000000..df731ba --- /dev/null +++ b/resources/snowflakes/LICENSE @@ -0,0 +1,20 @@ +MIT License + +Copyright (c) 2017 Arke Works + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/resources/snowflakes/NOTICE b/resources/snowflakes/NOTICE new file mode 100644 index 0000000..2e9df52 --- /dev/null +++ b/resources/snowflakes/NOTICE @@ -0,0 +1 @@ +You can find the original generator at https://github.com/arke-works/arke/blob/master/snowflakes/generator.go \ No newline at end of file diff --git a/resources/snowflakes/generator.go b/resources/snowflakes/generator.go new file mode 100644 index 0000000..e08167c --- /dev/null +++ b/resources/snowflakes/generator.go @@ -0,0 +1,75 @@ +package snowflakes + +import ( + "errors" + "sync" + "time" +) + +const ( + counterLen = 10 + counterMask = -1 ^ (-1 << counterLen) +) + +var ( + errNoFuture = errors.New("Start Time cannot be set in the future") +) + +// Generator is a fountain for new snowflakes. StartTime must be +// initialized to a past point in time and Instance ID can be any +// positive value or 0. +// +// If any value is not correctly set, new IDs cannot be produced. +type Generator struct { + StartTime int64 + mutex *sync.Mutex + sequence int32 + now int64 +} + +// NewID generates a new, unique snowflake value +// +// Up to 8192 snowflakes per second can be requested +// If exhausted, it blocks and sleeps until a new second +// of unix time starts. +// +// The return value is signed but always positive. +// +// Additionally, the return value is monotonic for a single +// instance and weakly monotonic for many instances. +func (g *Generator) NewID() (int64, error) { + if g.mutex == nil { + g.mutex = new(sync.Mutex) + } + if g.StartTime > time.Now().Unix() { + return 0, errNoFuture + } + g.mutex.Lock() + defer g.mutex.Unlock() + + var ( + now int64 + flake int64 + ) + now = int64(time.Now().Unix()) + + if now == g.now { + g.sequence = (g.sequence + 1) & counterMask + if g.sequence == 0 { + for now <= g.now { + now = int64(time.Now().Unix()) + time.Sleep(time.Microsecond * 100) + } + } + } else { + g.sequence = 0 + } + + g.now = now + + flake = int64( + ((now - g.StartTime) << counterLen) | + int64(g.sequence)) + + return flake, nil +} diff --git a/resources/text.go b/resources/text.go new file mode 100644 index 0000000..b9612b6 --- /dev/null +++ b/resources/text.go @@ -0,0 +1,12 @@ +package resources + +import ( + "html/template" + "strings" +) + +func OperateReplyText(unsafe string) template.HTML { + unsafe = template.HTMLEscapeString(unsafe) + unsafe = strings.Replace(unsafe, "\n", "
", -1) + return template.HTML(unsafe) +} diff --git a/resources/thread.go b/resources/thread.go new file mode 100644 index 0000000..2b21267 --- /dev/null +++ b/resources/thread.go @@ -0,0 +1,145 @@ +package resources + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/tidwall/buntdb" +) + +type Thread struct { + ID int64 `json:"id"` + StartReply int64 `json:"start"` + Board string `json:"board"` + Metadata Metadata `json:"-"` + + intReply *Reply + + intReplies []*Reply +} + +func (t *Thread) GetReplies() []*Reply { + return t.intReplies +} + +func (t *Thread) GetReply() *Reply { + return t.intReply +} + +func NewThread(tx *buntdb.Tx, host, board string, in *Thread, in2 *Reply) error { + var err error + + err = TestBoard(tx, host, in.Board) + if err != nil { + return err + } + + in.ID, err = getID() + if err != nil { + return err + } + in2.Thread = in.ID + + in2.ID, err = getID() + if err != nil { + return err + } + in.StartReply = in2.ID + + dat, err := json.Marshal(in) + if err != nil { + return err + } + + _, replaced, err := tx.Set( + fmt.Sprintf(threadPath, escapeString(host), escapeString(board), in.ID), + string(dat), + nil) + + if err != nil { + return err + } + if replaced { + return errors.New("Thread already exists") + } + + return NewReply(tx, host, board, in, in2, true) +} + +func TestThread(tx *buntdb.Tx, host, board string, id int64) error { + err := TestBoard(tx, host, board) + if err != nil { + return err + } + + _, err = tx.Get( + fmt.Sprintf(threadPath, escapeString(host), escapeString(board), id), + ) + return err +} + +func GetThread(tx *buntdb.Tx, host, board string, id int64) (*Thread, error) { + var ret = &Thread{} + dat, err := tx.Get( + fmt.Sprintf(threadPath, escapeString(host), escapeString(board), id), + ) + if err != nil { + return nil, err + } + if err = json.Unmarshal([]byte(dat), ret); err != nil { + return nil, err + } + + ret.intReply, err = GetReply(tx, host, board, id, ret.StartReply) + return ret, nil +} + +func DelThread(tx *buntdb.Tx, host, board string, id int64) error { + if _, err := tx.Delete( + fmt.Sprintf(threadPath, escapeString(host), escapeString(board), id), + ); err != nil { + tx.Rollback() + return err + } + return nil +} + +func FillReplies(tx *buntdb.Tx, host string, thread *Thread) (err error) { + thread.intReplies, err = ListReplies(tx, host, thread.Board, thread.ID) + return +} + +func ListThreads(tx *buntdb.Tx, host, board string) ([]*Thread, error) { + var threadList = []*Thread{} + var err error + + err = TestBoard(tx, host, board) + if err != nil { + return nil, err + } + + tx.DescendKeys( + fmt.Sprintf( + threadSPath, + escapeString(host), + escapeString(board), + ), + func(key, value string) bool { + var thread = &Thread{} + err = json.Unmarshal([]byte(value), thread) + if err != nil { + return false + } + thread.intReply, err = GetReply(tx, host, board, thread.ID, thread.StartReply) + if err != nil { + return false + } + + threadList = append(threadList, thread) + if len(threadList) >= 25 { + return false + } + return true + }) + return threadList, err +} diff --git a/vendor/github.com/GeertJohan/go.rice/AUTHORS b/vendor/github.com/GeertJohan/go.rice/AUTHORS new file mode 100644 index 0000000..20ff8ba --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/AUTHORS @@ -0,0 +1,4 @@ +Geert-Johan Riemer +Paul Maddox +Vincent Petithory + diff --git a/vendor/github.com/GeertJohan/go.rice/LICENSE b/vendor/github.com/GeertJohan/go.rice/LICENSE new file mode 100644 index 0000000..8b4409d --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013, Geert-Johan Riemer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/GeertJohan/go.rice/README.md b/vendor/github.com/GeertJohan/go.rice/README.md new file mode 100644 index 0000000..f071b7e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/README.md @@ -0,0 +1,151 @@ +## go.rice + +[![Wercker](https://img.shields.io/wercker/ci/54c7af4dcc09f9963725bb25.svg?style=flat-square)](https://app.wercker.com/#applications/54c7af4dcc09f9963725bb25) +[![Godoc](https://img.shields.io/badge/godoc-go.rice-blue.svg?style=flat-square)](https://godoc.org/github.com/GeertJohan/go.rice) + +go.rice is a [Go](http://golang.org) package that makes working with resources such as html,js,css,images and templates very easy. During development `go.rice` will load required files directly from disk. Upon deployment it is easy to add all resource files to a executable using the `rice` tool, without changing the source code for your package. go.rice provides several methods to add resources to a binary. + +### What does it do? +The first thing go.rice does is finding the correct absolute path for your resource files. Say you are executing go binary in your home directory, but your `html-files` are located in `$GOPATH/src/yourApplication/html-files`. `go.rice` will lookup the correct path for that directory (relative to the location of yourApplication). The only thing you have to do is include the resources using `rice.FindBox("html-files")`. + +This only works when the source is available to the machine executing the binary. Which is always the case when the binary was installed with `go get` or `go install`. It might occur that you wish to simply provide a binary, without source. The `rice` tool analyses source code and finds call's to `rice.FindBox(..)` and adds the required directories to the executable binary. There are several methods to add these resources. You can 'embed' by generating go source code, or append the resource to the executable as zip file. In both cases `go.rice` will detect the embedded or appended resources and load those, instead of looking up files from disk. + +### Installation + +Use `go get` to install the package the `rice` tool. +``` +go get github.com/GeertJohan/go.rice +go get github.com/GeertJohan/go.rice/rice +``` + +### Package usage + +Import the package: `import "github.com/GeertJohan/go.rice"` + +**Serving a static content folder over HTTP with a rice Box** +```go +http.Handle("/", http.FileServer(rice.MustFindBox("http-files").HTTPBox())) +http.ListenAndServe(":8080", nil) +``` + +**Service a static content folder over HTTP at a non-root location** +```go +box := rice.MustFindBox("cssfiles") +cssFileServer := http.StripPrefix("/css/", http.FileServer(box.HTTPBox())) +http.Handle("/css/", cssFileServer) +http.ListenAndServe(":8080", nil) +``` + +Note the *trailing slash* in `/css/` in both the call to +`http.StripPrefix` and `http.Handle`. + +**Loading a template** +```go +// find a rice.Box +templateBox, err := rice.FindBox("example-templates") +if err != nil { + log.Fatal(err) +} +// get file contents as string +templateString, err := templateBox.String("message.tmpl") +if err != nil { + log.Fatal(err) +} +// parse and execute the template +tmplMessage, err := template.New("message").Parse(templateString) +if err != nil { + log.Fatal(err) +} +tmplMessage.Execute(os.Stdout, map[string]string{"Message": "Hello, world!"}) + +``` + +Never call `FindBox()` or `MustFindBox()` from an `init()` function, as the boxes might have not been loaded at that time. + +### Tool usage +The `rice` tool lets you add the resources to a binary executable so the files are not loaded from the filesystem anymore. This creates a 'standalone' executable. There are several ways to add the resources to a binary, each has pro's and con's but all will work without requiring changes to the way you load the resources. + +#### embed-go +**Embed resources by generating Go source code** + +This method must be executed before building. It generates a single Go source file called *rice-box.go* for each package, that is compiled by the go compiler into the binary. + +The downside with this option is that the generated go source files can become very large, which will slow down compilation and require lots of memory to compile. + +Execute the following commands: +``` +rice embed-go +go build +``` + +*A Note on Symbolic Links*: `embed-go` uses the `os.Walk` function +from the standard library. The `os.Walk` function does **not** follow +symbolic links. So, when creating a box, be aware that any symbolic +links inside your box's directory will not be followed. **However**, +if the box itself is a symbolic link, its actual location will be +resolved first and then walked. In summary, if your box location is a +symbolic link, it will be followed but none of the symbolic links in +the box will be followed. + +#### embed-syso +**Embed resources by generating a coff .syso file and some .go source code** + +** This method is experimental and should not be used for production systems just yet ** + +This method must be executed before building. It generates a COFF .syso file and Go source file that are compiled by the go compiler into the binary. + +Execute the following commands: +``` +rice embed-syso +go build +``` + +#### append +**Append resources to executable as zip file** + +This method changes an already built executable. It appends the resources as zip file to the binary. It makes compilation a lot faster and can be used with large resource files. + +Downsides for appending are that it requires `zip` to be installed and does not provide a working Seek method. + +Run the following commands to create a standalone executable. +``` +go build -o example +rice append --exec example +``` + +**Note: requires zip command to be installed** + +On windows, install zip from http://gnuwin32.sourceforge.net/packages/zip.htm or cygwin/msys toolsets. + +#### Help information +Run `rice -h` for information about all options. + +You can run the -h option for each sub-command, e.g. `rice append -h`. + +### Order of precedence +When opening a new box, the rice package tries to locate the resources in the following order: + + - embedded in generated go source + - appended as zip + - 'live' from filesystem + + +### License +This project is licensed under a Simplified BSD license. Please read the [LICENSE file][license]. + +### TODO & Development +This package is not completed yet. Though it already provides working embedding, some important featuers are still missing. + - implement Readdir() correctly on virtualDir + - in-code TODO's + - find boxes in imported packages + +Less important stuff: + - idea, os/arch dependent embeds. rice checks if embedding file has _os_arch or build flags. If box is not requested by file without buildflags, then the buildflags are applied to the embed file. + +### Package documentation + +You will find package documentation at [godoc.org/github.com/GeertJohan/go.rice][godoc]. + + + [license]: https://github.com/GeertJohan/go.rice/blob/master/LICENSE + [godoc]: http://godoc.org/github.com/GeertJohan/go.rice diff --git a/vendor/github.com/GeertJohan/go.rice/appended.go b/vendor/github.com/GeertJohan/go.rice/appended.go new file mode 100644 index 0000000..a986a0c --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/appended.go @@ -0,0 +1,138 @@ +package rice + +import ( + "archive/zip" + "log" + "os" + "path/filepath" + "strings" + "time" + + "github.com/daaku/go.zipexe" + "github.com/kardianos/osext" +) + +// appendedBox defines an appended box +type appendedBox struct { + Name string // box name + Files map[string]*appendedFile // appended files (*zip.File) by full path +} + +type appendedFile struct { + zipFile *zip.File + dir bool + dirInfo *appendedDirInfo + children []*appendedFile + content []byte +} + +// appendedBoxes is a public register of appendes boxes +var appendedBoxes = make(map[string]*appendedBox) + +func init() { + // find if exec is appended + thisFile, err := osext.Executable() + if err != nil { + return // not appended or cant find self executable + } + closer, rd, err := zipexe.OpenCloser(thisFile) + if err != nil { + return // not appended + } + defer closer.Close() + + for _, f := range rd.File { + // get box and file name from f.Name + fileParts := strings.SplitN(strings.TrimLeft(filepath.ToSlash(f.Name), "/"), "/", 2) + boxName := fileParts[0] + var fileName string + if len(fileParts) > 1 { + fileName = fileParts[1] + } + + // find box or create new one if doesn't exist + box := appendedBoxes[boxName] + if box == nil { + box = &appendedBox{ + Name: boxName, + Files: make(map[string]*appendedFile), + } + appendedBoxes[boxName] = box + } + + // create and add file to box + af := &appendedFile{ + zipFile: f, + } + if f.Comment == "dir" { + af.dir = true + af.dirInfo = &appendedDirInfo{ + name: filepath.Base(af.zipFile.Name), + //++ TODO: use zip modtime when that is set correctly: af.zipFile.ModTime() + time: time.Now(), + } + } else { + // this is a file, we need it's contents so we can create a bytes.Reader when the file is opened + // make a new byteslice + af.content = make([]byte, af.zipFile.FileInfo().Size()) + // ignore reading empty files from zip (empty file still is a valid file to be read though!) + if len(af.content) > 0 { + // open io.ReadCloser + rc, err := af.zipFile.Open() + if err != nil { + af.content = nil // this will cause an error when the file is being opened or seeked (which is good) + // TODO: it's quite blunt to just log this stuff. but this is in init, so rice.Debug can't be changed yet.. + log.Printf("error opening appended file %s: %v", af.zipFile.Name, err) + } else { + _, err = rc.Read(af.content) + rc.Close() + if err != nil { + af.content = nil // this will cause an error when the file is being opened or seeked (which is good) + // TODO: it's quite blunt to just log this stuff. but this is in init, so rice.Debug can't be changed yet.. + log.Printf("error reading data for appended file %s: %v", af.zipFile.Name, err) + } + } + } + } + + // add appendedFile to box file list + box.Files[fileName] = af + + // add to parent dir (if any) + dirName := filepath.Dir(fileName) + if dirName == "." { + dirName = "" + } + if fileName != "" { // don't make box root dir a child of itself + if dir := box.Files[dirName]; dir != nil { + dir.children = append(dir.children, af) + } + } + } +} + +// implements os.FileInfo. +// used for Readdir() +type appendedDirInfo struct { + name string + time time.Time +} + +func (adi *appendedDirInfo) Name() string { + return adi.name +} +func (adi *appendedDirInfo) Size() int64 { + return 0 +} +func (adi *appendedDirInfo) Mode() os.FileMode { + return os.ModeDir +} +func (adi *appendedDirInfo) ModTime() time.Time { + return adi.time +} +func (adi *appendedDirInfo) IsDir() bool { + return true +} +func (adi *appendedDirInfo) Sys() interface{} { + return nil +} diff --git a/vendor/github.com/GeertJohan/go.rice/box.go b/vendor/github.com/GeertJohan/go.rice/box.go new file mode 100644 index 0000000..71482e2 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/box.go @@ -0,0 +1,337 @@ +package rice + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/GeertJohan/go.rice/embedded" +) + +// Box abstracts a directory for resources/files. +// It can either load files from disk, or from embedded code (when `rice --embed` was ran). +type Box struct { + name string + absolutePath string + embed *embedded.EmbeddedBox + appendd *appendedBox +} + +var defaultLocateOrder = []LocateMethod{LocateEmbedded, LocateAppended, LocateFS} + +func findBox(name string, order []LocateMethod) (*Box, error) { + b := &Box{name: name} + + // no support for absolute paths since gopath can be different on different machines. + // therefore, required box must be located relative to package requiring it. + if filepath.IsAbs(name) { + return nil, errors.New("given name/path is absolute") + } + + var err error + for _, method := range order { + switch method { + case LocateEmbedded: + if embed := embedded.EmbeddedBoxes[name]; embed != nil { + b.embed = embed + return b, nil + } + + case LocateAppended: + appendedBoxName := strings.Replace(name, `/`, `-`, -1) + if appendd := appendedBoxes[appendedBoxName]; appendd != nil { + b.appendd = appendd + return b, nil + } + + case LocateFS: + // resolve absolute directory path + err := b.resolveAbsolutePathFromCaller() + if err != nil { + continue + } + // check if absolutePath exists on filesystem + info, err := os.Stat(b.absolutePath) + if err != nil { + continue + } + // check if absolutePath is actually a directory + if !info.IsDir() { + err = errors.New("given name/path is not a directory") + continue + } + return b, nil + case LocateWorkingDirectory: + // resolve absolute directory path + err := b.resolveAbsolutePathFromWorkingDirectory() + if err != nil { + continue + } + // check if absolutePath exists on filesystem + info, err := os.Stat(b.absolutePath) + if err != nil { + continue + } + // check if absolutePath is actually a directory + if !info.IsDir() { + err = errors.New("given name/path is not a directory") + continue + } + return b, nil + } + } + + if err == nil { + err = fmt.Errorf("could not locate box %q", name) + } + + return nil, err +} + +// FindBox returns a Box instance for given name. +// When the given name is a relative path, it's base path will be the calling pkg/cmd's source root. +// When the given name is absolute, it's absolute. derp. +// Make sure the path doesn't contain any sensitive information as it might be placed into generated go source (embedded). +func FindBox(name string) (*Box, error) { + return findBox(name, defaultLocateOrder) +} + +// MustFindBox returns a Box instance for given name, like FindBox does. +// It does not return an error, instead it panics when an error occurs. +func MustFindBox(name string) *Box { + box, err := findBox(name, defaultLocateOrder) + if err != nil { + panic(err) + } + return box +} + +// This is injected as a mutable function literal so that we can mock it out in +// tests and return a fixed test file. +var resolveAbsolutePathFromCaller = func(name string, nStackFrames int) (string, error) { + _, callingGoFile, _, ok := runtime.Caller(nStackFrames) + if !ok { + return "", errors.New("couldn't find caller on stack") + } + + // resolve to proper path + pkgDir := filepath.Dir(callingGoFile) + // fix for go cover + const coverPath = "_test/_obj_test" + if !filepath.IsAbs(pkgDir) { + if i := strings.Index(pkgDir, coverPath); i >= 0 { + pkgDir = pkgDir[:i] + pkgDir[i+len(coverPath):] // remove coverPath + pkgDir = filepath.Join(os.Getenv("GOPATH"), "src", pkgDir) // make absolute + } + } + return filepath.Join(pkgDir, name), nil +} + +func (b *Box) resolveAbsolutePathFromCaller() error { + path, err := resolveAbsolutePathFromCaller(b.name, 4) + if err != nil { + return err + } + b.absolutePath = path + return nil + +} + +func (b *Box) resolveAbsolutePathFromWorkingDirectory() error { + path, err := os.Getwd() + if err != nil { + return err + } + b.absolutePath = filepath.Join(path, b.name) + return nil +} + +// IsEmbedded indicates wether this box was embedded into the application +func (b *Box) IsEmbedded() bool { + return b.embed != nil +} + +// IsAppended indicates wether this box was appended to the application +func (b *Box) IsAppended() bool { + return b.appendd != nil +} + +// Time returns how actual the box is. +// When the box is embedded, it's value is saved in the embedding code. +// When the box is live, this methods returns time.Now() +func (b *Box) Time() time.Time { + if b.IsEmbedded() { + return b.embed.Time + } + + //++ TODO: return time for appended box + + return time.Now() +} + +// Open opens a File from the box +// If there is an error, it will be of type *os.PathError. +func (b *Box) Open(name string) (*File, error) { + if Debug { + fmt.Printf("Open(%s)\n", name) + } + + if b.IsEmbedded() { + if Debug { + fmt.Println("Box is embedded") + } + + // trim prefix (paths are relative to box) + name = strings.TrimLeft(name, "/") + if Debug { + fmt.Printf("Trying %s\n", name) + } + + // search for file + ef := b.embed.Files[name] + if ef == nil { + if Debug { + fmt.Println("Didn't find file in embed") + } + // file not found, try dir + ed := b.embed.Dirs[name] + if ed == nil { + if Debug { + fmt.Println("Didn't find dir in embed") + } + // dir not found, error out + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: os.ErrNotExist, + } + } + if Debug { + fmt.Println("Found dir. Returning virtual dir") + } + vd := newVirtualDir(ed) + return &File{virtualD: vd}, nil + } + + // box is embedded + if Debug { + fmt.Println("Found file. Returning virtual file") + } + vf := newVirtualFile(ef) + return &File{virtualF: vf}, nil + } + + if b.IsAppended() { + // trim prefix (paths are relative to box) + name = strings.TrimLeft(name, "/") + + // search for file + appendedFile := b.appendd.Files[name] + if appendedFile == nil { + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: os.ErrNotExist, + } + } + + // create new file + f := &File{ + appendedF: appendedFile, + } + + // if this file is a directory, we want to be able to read and seek + if !appendedFile.dir { + // looks like malformed data in zip, error now + if appendedFile.content == nil { + return nil, &os.PathError{ + Op: "open", + Path: "name", + Err: errors.New("error reading data from zip file"), + } + } + // create new bytes.Reader + f.appendedFileReader = bytes.NewReader(appendedFile.content) + } + + // all done + return f, nil + } + + // perform os open + if Debug { + fmt.Printf("Using os.Open(%s)", filepath.Join(b.absolutePath, name)) + } + file, err := os.Open(filepath.Join(b.absolutePath, name)) + if err != nil { + return nil, err + } + return &File{realF: file}, nil +} + +// Bytes returns the content of the file with given name as []byte. +func (b *Box) Bytes(name string) ([]byte, error) { + file, err := b.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + content, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + return content, nil +} + +// MustBytes returns the content of the file with given name as []byte. +// panic's on error. +func (b *Box) MustBytes(name string) []byte { + bts, err := b.Bytes(name) + if err != nil { + panic(err) + } + return bts +} + +// String returns the content of the file with given name as string. +func (b *Box) String(name string) (string, error) { + // check if box is embedded, optimized fast path + if b.IsEmbedded() { + // find file in embed + ef := b.embed.Files[name] + if ef == nil { + return "", os.ErrNotExist + } + // return as string + return ef.Content, nil + } + + bts, err := b.Bytes(name) + if err != nil { + return "", err + } + return string(bts), nil +} + +// MustString returns the content of the file with given name as string. +// panic's on error. +func (b *Box) MustString(name string) string { + str, err := b.String(name) + if err != nil { + panic(err) + } + return str +} + +// Name returns the name of the box +func (b *Box) Name() string { + return b.name +} diff --git a/vendor/github.com/GeertJohan/go.rice/config.go b/vendor/github.com/GeertJohan/go.rice/config.go new file mode 100644 index 0000000..45eb398 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/config.go @@ -0,0 +1,39 @@ +package rice + +// LocateMethod defines how a box is located. +type LocateMethod int + +const ( + LocateFS = LocateMethod(iota) // Locate on the filesystem according to package path. + LocateAppended // Locate boxes appended to the executable. + LocateEmbedded // Locate embedded boxes. + LocateWorkingDirectory // Locate on the binary working directory +) + +// Config allows customizing the box lookup behavior. +type Config struct { + // LocateOrder defines the priority order that boxes are searched for. By + // default, the package global FindBox searches for embedded boxes first, + // then appended boxes, and then finally boxes on the filesystem. That + // search order may be customized by provided the ordered list here. Leaving + // out a particular method will omit that from the search space. For + // example, []LocateMethod{LocateEmbedded, LocateAppended} will never search + // the filesystem for boxes. + LocateOrder []LocateMethod +} + +// FindBox searches for boxes using the LocateOrder of the config. +func (c *Config) FindBox(boxName string) (*Box, error) { + return findBox(boxName, c.LocateOrder) +} + +// MustFindBox searches for boxes using the LocateOrder of the config, like +// FindBox does. It does not return an error, instead it panics when an error +// occurs. +func (c *Config) MustFindBox(boxName string) *Box { + box, err := findBox(boxName, c.LocateOrder) + if err != nil { + panic(err) + } + return box +} diff --git a/vendor/github.com/GeertJohan/go.rice/debug.go b/vendor/github.com/GeertJohan/go.rice/debug.go new file mode 100644 index 0000000..2e68c84 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/debug.go @@ -0,0 +1,4 @@ +package rice + +// Debug can be set to true to enable debugging. +var Debug = false diff --git a/vendor/github.com/GeertJohan/go.rice/embedded.go b/vendor/github.com/GeertJohan/go.rice/embedded.go new file mode 100644 index 0000000..4f03fe1 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/embedded.go @@ -0,0 +1,90 @@ +package rice + +import ( + "os" + "time" + + "github.com/GeertJohan/go.rice/embedded" +) + +// re-type to make exported methods invisible to user (godoc) +// they're not required for the user +// embeddedDirInfo implements os.FileInfo +type embeddedDirInfo embedded.EmbeddedDir + +// Name returns the base name of the directory +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Name() string { + return ed.Filename +} + +// Size always returns 0 +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Size() int64 { + return 0 +} + +// Mode returns the file mode bits +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Mode() os.FileMode { + return os.FileMode(0555 | os.ModeDir) // dr-xr-xr-x +} + +// ModTime returns the modification time +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) ModTime() time.Time { + return ed.DirModTime +} + +// IsDir returns the abbreviation for Mode().IsDir() (always true) +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) IsDir() bool { + return true +} + +// Sys returns the underlying data source (always nil) +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Sys() interface{} { + return nil +} + +// re-type to make exported methods invisible to user (godoc) +// they're not required for the user +// embeddedFileInfo implements os.FileInfo +type embeddedFileInfo embedded.EmbeddedFile + +// Name returns the base name of the file +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Name() string { + return ef.Filename +} + +// Size returns the length in bytes for regular files; system-dependent for others +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Size() int64 { + return int64(len(ef.Content)) +} + +// Mode returns the file mode bits +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Mode() os.FileMode { + return os.FileMode(0555) // r-xr-xr-x +} + +// ModTime returns the modification time +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) ModTime() time.Time { + return ef.FileModTime +} + +// IsDir returns the abbreviation for Mode().IsDir() (always false) +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) IsDir() bool { + return false +} + +// Sys returns the underlying data source (always nil) +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Sys() interface{} { + return nil +} diff --git a/vendor/github.com/GeertJohan/go.rice/embedded/embedded.go b/vendor/github.com/GeertJohan/go.rice/embedded/embedded.go new file mode 100644 index 0000000..bba8e58 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/embedded/embedded.go @@ -0,0 +1,80 @@ +// Package embedded defines embedded data types that are shared between the go.rice package and generated code. +package embedded + +import ( + "fmt" + "path/filepath" + "strings" + "time" +) + +const ( + EmbedTypeGo = 0 + EmbedTypeSyso = 1 +) + +// EmbeddedBox defines an embedded box +type EmbeddedBox struct { + Name string // box name + Time time.Time // embed time + EmbedType int // kind of embedding + Files map[string]*EmbeddedFile // ALL embedded files by full path + Dirs map[string]*EmbeddedDir // ALL embedded dirs by full path +} + +// Link creates the ChildDirs and ChildFiles links in all EmbeddedDir's +func (e *EmbeddedBox) Link() { + for path, ed := range e.Dirs { + fmt.Println(path) + ed.ChildDirs = make([]*EmbeddedDir, 0) + ed.ChildFiles = make([]*EmbeddedFile, 0) + } + for path, ed := range e.Dirs { + parentDirpath, _ := filepath.Split(path) + if strings.HasSuffix(parentDirpath, "/") { + parentDirpath = parentDirpath[:len(parentDirpath)-1] + } + parentDir := e.Dirs[parentDirpath] + if parentDir == nil { + panic("parentDir `" + parentDirpath + "` is missing in embedded box") + } + parentDir.ChildDirs = append(parentDir.ChildDirs, ed) + } + for path, ef := range e.Files { + dirpath, _ := filepath.Split(path) + if strings.HasSuffix(dirpath, "/") { + dirpath = dirpath[:len(dirpath)-1] + } + dir := e.Dirs[dirpath] + if dir == nil { + panic("dir `" + dirpath + "` is missing in embedded box") + } + dir.ChildFiles = append(dir.ChildFiles, ef) + } +} + +// EmbeddedDir is instanced in the code generated by the rice tool and contains all necicary information about an embedded file +type EmbeddedDir struct { + Filename string + DirModTime time.Time + ChildDirs []*EmbeddedDir // direct childs, as returned by virtualDir.Readdir() + ChildFiles []*EmbeddedFile // direct childs, as returned by virtualDir.Readdir() +} + +// EmbeddedFile is instanced in the code generated by the rice tool and contains all necicary information about an embedded file +type EmbeddedFile struct { + Filename string // filename + FileModTime time.Time + Content string +} + +// EmbeddedBoxes is a public register of embedded boxes +var EmbeddedBoxes = make(map[string]*EmbeddedBox) + +// RegisterEmbeddedBox registers an EmbeddedBox +func RegisterEmbeddedBox(name string, box *EmbeddedBox) { + if _, exists := EmbeddedBoxes[name]; exists { + panic(fmt.Sprintf("EmbeddedBox with name `%s` exists already", name)) + } + EmbeddedBoxes[name] = box +} diff --git a/vendor/github.com/GeertJohan/go.rice/file.go b/vendor/github.com/GeertJohan/go.rice/file.go new file mode 100644 index 0000000..606a188 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/file.go @@ -0,0 +1,144 @@ +package rice + +import ( + "bytes" + "errors" + "os" + "path/filepath" +) + +// File implements the io.Reader, io.Seeker, io.Closer and http.File interfaces +type File struct { + // File abstracts file methods so the user doesn't see the difference between rice.virtualFile, rice.virtualDir and os.File + // TODO: maybe use internal File interface and four implementations: *os.File, appendedFile, virtualFile, virtualDir + + // real file on disk + realF *os.File + + // when embedded (go) + virtualF *virtualFile + virtualD *virtualDir + + // when appended (zip) + appendedF *appendedFile + appendedFileReader *bytes.Reader + // TODO: is appendedFileReader subject of races? Might need a lock here.. +} + +// Close is like (*os.File).Close() +// Visit http://golang.org/pkg/os/#File.Close for more information +func (f *File) Close() error { + if f.appendedF != nil { + if f.appendedFileReader == nil { + return errors.New("already closed") + } + f.appendedFileReader = nil + return nil + } + if f.virtualF != nil { + return f.virtualF.close() + } + if f.virtualD != nil { + return f.virtualD.close() + } + return f.realF.Close() +} + +// Stat is like (*os.File).Stat() +// Visit http://golang.org/pkg/os/#File.Stat for more information +func (f *File) Stat() (os.FileInfo, error) { + if f.appendedF != nil { + if f.appendedF.dir { + return f.appendedF.dirInfo, nil + } + if f.appendedFileReader == nil { + return nil, errors.New("file is closed") + } + return f.appendedF.zipFile.FileInfo(), nil + } + if f.virtualF != nil { + return f.virtualF.stat() + } + if f.virtualD != nil { + return f.virtualD.stat() + } + return f.realF.Stat() +} + +// Readdir is like (*os.File).Readdir() +// Visit http://golang.org/pkg/os/#File.Readdir for more information +func (f *File) Readdir(count int) ([]os.FileInfo, error) { + if f.appendedF != nil { + if f.appendedF.dir { + fi := make([]os.FileInfo, 0, len(f.appendedF.children)) + for _, childAppendedFile := range f.appendedF.children { + if childAppendedFile.dir { + fi = append(fi, childAppendedFile.dirInfo) + } else { + fi = append(fi, childAppendedFile.zipFile.FileInfo()) + } + } + return fi, nil + } + //++ TODO: is os.ErrInvalid the correct error for Readdir on file? + return nil, os.ErrInvalid + } + if f.virtualF != nil { + return f.virtualF.readdir(count) + } + if f.virtualD != nil { + return f.virtualD.readdir(count) + } + return f.realF.Readdir(count) +} + +// Read is like (*os.File).Read() +// Visit http://golang.org/pkg/os/#File.Read for more information +func (f *File) Read(bts []byte) (int, error) { + if f.appendedF != nil { + if f.appendedFileReader == nil { + return 0, &os.PathError{ + Op: "read", + Path: filepath.Base(f.appendedF.zipFile.Name), + Err: errors.New("file is closed"), + } + } + if f.appendedF.dir { + return 0, &os.PathError{ + Op: "read", + Path: filepath.Base(f.appendedF.zipFile.Name), + Err: errors.New("is a directory"), + } + } + return f.appendedFileReader.Read(bts) + } + if f.virtualF != nil { + return f.virtualF.read(bts) + } + if f.virtualD != nil { + return f.virtualD.read(bts) + } + return f.realF.Read(bts) +} + +// Seek is like (*os.File).Seek() +// Visit http://golang.org/pkg/os/#File.Seek for more information +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.appendedF != nil { + if f.appendedFileReader == nil { + return 0, &os.PathError{ + Op: "seek", + Path: filepath.Base(f.appendedF.zipFile.Name), + Err: errors.New("file is closed"), + } + } + return f.appendedFileReader.Seek(offset, whence) + } + if f.virtualF != nil { + return f.virtualF.seek(offset, whence) + } + if f.virtualD != nil { + return f.virtualD.seek(offset, whence) + } + return f.realF.Seek(offset, whence) +} diff --git a/vendor/github.com/GeertJohan/go.rice/http.go b/vendor/github.com/GeertJohan/go.rice/http.go new file mode 100644 index 0000000..3a61f0e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/http.go @@ -0,0 +1,21 @@ +package rice + +import ( + "net/http" +) + +// HTTPBox implements http.FileSystem which allows the use of Box with a http.FileServer. +// e.g.: http.Handle("/", http.FileServer(rice.MustFindBox("http-files").HTTPBox())) +type HTTPBox struct { + *Box +} + +// HTTPBox creates a new HTTPBox from an existing Box +func (b *Box) HTTPBox() *HTTPBox { + return &HTTPBox{b} +} + +// Open returns a File using the http.File interface +func (hb *HTTPBox) Open(name string) (http.File, error) { + return hb.Box.Open(name) +} diff --git a/vendor/github.com/GeertJohan/go.rice/sort.go b/vendor/github.com/GeertJohan/go.rice/sort.go new file mode 100644 index 0000000..cd83c65 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/sort.go @@ -0,0 +1,19 @@ +package rice + +import "os" + +// SortByName allows an array of os.FileInfo objects +// to be easily sorted by filename using sort.Sort(SortByName(array)) +type SortByName []os.FileInfo + +func (f SortByName) Len() int { return len(f) } +func (f SortByName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f SortByName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// SortByModified allows an array of os.FileInfo objects +// to be easily sorted by modified date using sort.Sort(SortByModified(array)) +type SortByModified []os.FileInfo + +func (f SortByModified) Len() int { return len(f) } +func (f SortByModified) Less(i, j int) bool { return f[i].ModTime().Unix() > f[j].ModTime().Unix() } +func (f SortByModified) Swap(i, j int) { f[i], f[j] = f[j], f[i] } diff --git a/vendor/github.com/GeertJohan/go.rice/virtual.go b/vendor/github.com/GeertJohan/go.rice/virtual.go new file mode 100644 index 0000000..50bff16 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/virtual.go @@ -0,0 +1,252 @@ +package rice + +import ( + "errors" + "io" + "os" + "path/filepath" + "sort" + + "github.com/GeertJohan/go.rice/embedded" +) + +//++ TODO: IDEA: merge virtualFile and virtualDir, this decreases work done by rice.File + +// Error indicating some function is not implemented yet (but available to satisfy an interface) +var ErrNotImplemented = errors.New("not implemented yet") + +// virtualFile is a 'stateful' virtual file. +// virtualFile wraps an *EmbeddedFile for a call to Box.Open() and virtualizes 'read cursor' (offset) and 'closing'. +// virtualFile is only internally visible and should be exposed through rice.File +type virtualFile struct { + *embedded.EmbeddedFile // the actual embedded file, embedded to obtain methods + offset int64 // read position on the virtual file + closed bool // closed when true +} + +// create a new virtualFile for given EmbeddedFile +func newVirtualFile(ef *embedded.EmbeddedFile) *virtualFile { + vf := &virtualFile{ + EmbeddedFile: ef, + offset: 0, + closed: false, + } + return vf +} + +//++ TODO check for nil pointers in all these methods. When so: return os.PathError with Err: os.ErrInvalid + +func (vf *virtualFile) close() error { + if vf.closed { + return &os.PathError{ + Op: "close", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("already closed"), + } + } + vf.EmbeddedFile = nil + vf.closed = true + return nil +} + +func (vf *virtualFile) stat() (os.FileInfo, error) { + if vf.closed { + return nil, &os.PathError{ + Op: "stat", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + return (*embeddedFileInfo)(vf.EmbeddedFile), nil +} + +func (vf *virtualFile) readdir(count int) ([]os.FileInfo, error) { + if vf.closed { + return nil, &os.PathError{ + Op: "readdir", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + //TODO: return proper error for a readdir() call on a file + return nil, ErrNotImplemented +} + +func (vf *virtualFile) read(bts []byte) (int, error) { + if vf.closed { + return 0, &os.PathError{ + Op: "read", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + + end := vf.offset + int64(len(bts)) + + if end >= int64(len(vf.Content)) { + // end of file, so return what we have + EOF + n := copy(bts, vf.Content[vf.offset:]) + vf.offset = 0 + return n, io.EOF + } + + n := copy(bts, vf.Content[vf.offset:end]) + vf.offset += int64(n) + return n, nil + +} + +func (vf *virtualFile) seek(offset int64, whence int) (int64, error) { + if vf.closed { + return 0, &os.PathError{ + Op: "seek", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + var e error + + //++ TODO: check if this is correct implementation for seek + switch whence { + case os.SEEK_SET: + //++ check if new offset isn't out of bounds, set e when it is, then break out of switch + vf.offset = offset + case os.SEEK_CUR: + //++ check if new offset isn't out of bounds, set e when it is, then break out of switch + vf.offset += offset + case os.SEEK_END: + //++ check if new offset isn't out of bounds, set e when it is, then break out of switch + vf.offset = int64(len(vf.EmbeddedFile.Content)) - offset + } + + if e != nil { + return 0, &os.PathError{ + Op: "seek", + Path: vf.Filename, + Err: e, + } + } + + return vf.offset, nil +} + +// virtualDir is a 'stateful' virtual directory. +// virtualDir wraps an *EmbeddedDir for a call to Box.Open() and virtualizes 'closing'. +// virtualDir is only internally visible and should be exposed through rice.File +type virtualDir struct { + *embedded.EmbeddedDir + offset int // readdir position on the directory + closed bool +} + +// create a new virtualDir for given EmbeddedDir +func newVirtualDir(ed *embedded.EmbeddedDir) *virtualDir { + vd := &virtualDir{ + EmbeddedDir: ed, + offset: 0, + closed: false, + } + return vd +} + +func (vd *virtualDir) close() error { + //++ TODO: needs sync mutex? + if vd.closed { + return &os.PathError{ + Op: "close", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("already closed"), + } + } + vd.closed = true + return nil +} + +func (vd *virtualDir) stat() (os.FileInfo, error) { + if vd.closed { + return nil, &os.PathError{ + Op: "stat", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + return (*embeddedDirInfo)(vd.EmbeddedDir), nil +} + +func (vd *virtualDir) readdir(n int) (fi []os.FileInfo, err error) { + + if vd.closed { + return nil, &os.PathError{ + Op: "readdir", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + + // Build up the array of our contents + var files []os.FileInfo + + // Add the child directories + for _, child := range vd.ChildDirs { + child.Filename = filepath.Base(child.Filename) + files = append(files, (*embeddedDirInfo)(child)) + } + + // Add the child files + for _, child := range vd.ChildFiles { + child.Filename = filepath.Base(child.Filename) + files = append(files, (*embeddedFileInfo)(child)) + } + + // Sort it by filename (lexical order) + sort.Sort(SortByName(files)) + + // Return all contents if that's what is requested + if n <= 0 { + vd.offset = 0 + return files, nil + } + + // If user has requested past the end of our list + // return what we can and send an EOF + if vd.offset+n >= len(files) { + offset := vd.offset + vd.offset = 0 + return files[offset:], io.EOF + } + + offset := vd.offset + vd.offset += n + return files[offset : offset+n], nil + +} + +func (vd *virtualDir) read(bts []byte) (int, error) { + if vd.closed { + return 0, &os.PathError{ + Op: "read", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + return 0, &os.PathError{ + Op: "read", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("is a directory"), + } +} + +func (vd *virtualDir) seek(offset int64, whence int) (int64, error) { + if vd.closed { + return 0, &os.PathError{ + Op: "seek", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + return 0, &os.PathError{ + Op: "seek", + Path: vd.Filename, + Err: errors.New("is a directory"), + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/walk.go b/vendor/github.com/GeertJohan/go.rice/walk.go new file mode 100644 index 0000000..3042aea --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/walk.go @@ -0,0 +1,122 @@ +package rice + +import ( + "os" + "path/filepath" + "sort" + "strings" +) + +// Walk is like filepath.Walk() +// Visit http://golang.org/pkg/path/filepath/#Walk for more information +func (b *Box) Walk(path string, walkFn filepath.WalkFunc) error { + + pathFile, err := b.Open(path) + if err != nil { + return err + } + defer pathFile.Close() + + pathInfo, err := pathFile.Stat() + if err != nil { + return err + } + + if b.IsAppended() || b.IsEmbedded() { + return b.walk(path, pathInfo, walkFn) + } + + // We don't have any embedded or appended box so use live filesystem mode + return filepath.Walk(b.absolutePath+string(os.PathSeparator)+path, func(path string, info os.FileInfo, err error) error { + + // Strip out the box name from the returned paths + path = strings.TrimPrefix(path, b.absolutePath+string(os.PathSeparator)) + return walkFn(path, info, err) + + }) + +} + +// walk recursively descends path. +// See walk() in $GOROOT/src/pkg/path/filepath/path.go +func (b *Box) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := b.readDirNames(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + + filename := filepath.Join(path, name) + fileObject, err := b.Open(filename) + if err != nil { + return err + } + defer fileObject.Close() + + fileInfo, err := fileObject.Stat() + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = b.walk(filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + + return nil + +} + +// readDirNames reads the directory named by path and returns a sorted list of directory entries. +// See readDirNames() in $GOROOT/pkg/path/filepath/path.go +func (b *Box) readDirNames(path string) ([]string, error) { + + f, err := b.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + stat, err := f.Stat() + if err != nil { + return nil, err + } + + if !stat.IsDir() { + return nil, nil + } + + infos, err := f.Readdir(0) + if err != nil { + return nil, err + } + + var names []string + + for _, info := range infos { + names = append(names, info.Name()) + } + + sort.Strings(names) + return names, nil + +} diff --git a/vendor/github.com/GeertJohan/go.rice/wercker.yml b/vendor/github.com/GeertJohan/go.rice/wercker.yml new file mode 100644 index 0000000..b86467f --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/wercker.yml @@ -0,0 +1,31 @@ +box: wercker/golang + +build: + steps: + - setup-go-workspace + + - script: + name: get dependencies + code: | + go get -d -t ./... + + - script: + name: build + code: | + go build -x ./... + + - script: + name: test + code: | + go test -cover ./... + + - script: + name: vet + code: | + go vet ./... + + - script: + name: lint + code: | + go get github.com/golang/lint/golint + golint . diff --git a/vendor/github.com/daaku/go.zipexe/license b/vendor/github.com/daaku/go.zipexe/license new file mode 100644 index 0000000..6a2f15c --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/license @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2012-2015 Carlos Castillo + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/daaku/go.zipexe/readme.md b/vendor/github.com/daaku/go.zipexe/readme.md new file mode 100644 index 0000000..3ec0e8e --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/readme.md @@ -0,0 +1,5 @@ +go.zipexe +========= + +This module was taken as-is from https://github.com/cookieo9/resources-go. +Documentation: https://godoc.org/github.com/daaku/go.zipexe diff --git a/vendor/github.com/daaku/go.zipexe/zipexe.go b/vendor/github.com/daaku/go.zipexe/zipexe.go new file mode 100644 index 0000000..6004606 --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/zipexe.go @@ -0,0 +1,142 @@ +// Package zipexe attempts to open an executable binary file as a zip file. +package zipexe + +import ( + "archive/zip" + "debug/elf" + "debug/macho" + "debug/pe" + "errors" + "io" + "os" +) + +// Opens a zip file by path. +func Open(path string) (*zip.Reader, error) { + _, rd, err := OpenCloser(path) + return rd, err +} + +// OpenCloser is like Open but returns an additional Closer to avoid leaking open files. +func OpenCloser(path string) (io.Closer, *zip.Reader, error) { + file, err := os.Open(path) + if err != nil { + return nil, nil, err + } + finfo, err := file.Stat() + if err != nil { + return nil, nil, err + } + zr, err := NewReader(file, finfo.Size()) + if err != nil { + return nil, nil, err + } + return file, zr, nil +} + +// Open a zip file, specially handling various binaries that may have been +// augmented with zip data. +func NewReader(rda io.ReaderAt, size int64) (*zip.Reader, error) { + handlers := []func(io.ReaderAt, int64) (*zip.Reader, error){ + zip.NewReader, + zipExeReaderMacho, + zipExeReaderElf, + zipExeReaderPe, + } + + for _, handler := range handlers { + zfile, err := handler(rda, size) + if err == nil { + return zfile, nil + } + } + return nil, errors.New("Couldn't Open As Executable") +} + +// zipExeReaderMacho treats the file as a Mach-O binary +// (Mac OS X / Darwin executable) and attempts to find a zip archive. +func zipExeReaderMacho(rda io.ReaderAt, size int64) (*zip.Reader, error) { + file, err := macho.NewFile(rda) + if err != nil { + return nil, err + } + + var max int64 + for _, load := range file.Loads { + seg, ok := load.(*macho.Segment) + if ok { + // Check if the segment contains a zip file + if zfile, err := zip.NewReader(seg, int64(seg.Filesz)); err == nil { + return zfile, nil + } + + // Otherwise move end of file pointer + end := int64(seg.Offset + seg.Filesz) + if end > max { + max = end + } + } + } + + // No zip file within binary, try appended to end + section := io.NewSectionReader(rda, max, size-max) + return zip.NewReader(section, section.Size()) +} + +// zipExeReaderPe treats the file as a Portable Exectuable binary +// (Windows executable) and attempts to find a zip archive. +func zipExeReaderPe(rda io.ReaderAt, size int64) (*zip.Reader, error) { + file, err := pe.NewFile(rda) + if err != nil { + return nil, err + } + + var max int64 + for _, sec := range file.Sections { + // Check if this section has a zip file + if zfile, err := zip.NewReader(sec, int64(sec.Size)); err == nil { + return zfile, nil + } + + // Otherwise move end of file pointer + end := int64(sec.Offset + sec.Size) + if end > max { + max = end + } + } + + // No zip file within binary, try appended to end + section := io.NewSectionReader(rda, max, size-max) + return zip.NewReader(section, section.Size()) +} + +// zipExeReaderElf treats the file as a ELF binary +// (linux/BSD/etc... executable) and attempts to find a zip archive. +func zipExeReaderElf(rda io.ReaderAt, size int64) (*zip.Reader, error) { + file, err := elf.NewFile(rda) + if err != nil { + return nil, err + } + + var max int64 + for _, sect := range file.Sections { + if sect.Type == elf.SHT_NOBITS { + continue + } + + // Check if this section has a zip file + if zfile, err := zip.NewReader(sect, int64(sect.Size)); err == nil { + return zfile, nil + } + + // Otherwise move end of file pointer + end := int64(sect.Offset + sect.Size) + if end > max { + max = end + } + } + + // No zip file within binary, try appended to end + section := io.NewSectionReader(rda, max, size-max) + return zip.NewReader(section, section.Size()) +} diff --git a/vendor/github.com/hlandau/passlib/COPYING b/vendor/github.com/hlandau/passlib/COPYING new file mode 100644 index 0000000..d2aa62a --- /dev/null +++ b/vendor/github.com/hlandau/passlib/COPYING @@ -0,0 +1,39 @@ +passlib is a Golang password verification library strongly inspired by and +derived from Python passlib (). The BSD +license is preserved and extended to all new code. + +License for Passlib +=================== +Passlib is (c) `Assurance Technologies `_, +and is released under the `BSD license `_:: + + Passlib + Copyright (c) 2008-2012 Assurance Technologies, LLC. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of Assurance Technologies, nor the names of the + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hlandau/passlib/README.md b/vendor/github.com/hlandau/passlib/README.md new file mode 100644 index 0000000..ba9b42c --- /dev/null +++ b/vendor/github.com/hlandau/passlib/README.md @@ -0,0 +1,95 @@ +passlib for go +============== + +[![GoDoc](https://godoc.org/gopkg.in/hlandau/passlib.v1?status.svg)](https://godoc.org/gopkg.in/hlandau/passlib.v1) [![Build Status](https://travis-ci.org/hlandau/passlib.svg?branch=master)](https://travis-ci.org/hlandau/passlib) + +[Python's passlib](https://pythonhosted.org/passlib/) is quite an amazing +library. I'm not sure there's a password library in existence with more thought +put into it, or with more support for obscure password formats. + +This is a skeleton of a port of passlib to Go. It dogmatically adopts the +modular crypt format, which [passlib has excellent documentation for](https://pythonhosted.org/passlib/modular_crypt_format.html#modular-crypt-format). + +Currently, it supports sha256-crypt, sha512-crypt, scrypt-sha256, bcrypt and +passlib's bcrypt-sha256 variant. By default, it will hash using scrypt-sha256 +and verify existing hashes using any of these schemes. + +Example Usage +------------- +There's a default context for ease of use. Most people need only concern +themselves with the functions `Hash` and `Verify`: + +```go +// Hash a plaintext, UTF-8 password. +func Hash(password string) (hash string, err error) + +// Verifies a plaintext, UTF-8 password using a previously derived hash. +// Returns non-nil err if verification fails. +// +// Also returns an upgraded password hash if the hash provided is +// deprecated. +func Verify(password, hash string) (newHash string, err error) +``` + +Here's a rough skeleton of typical usage. + +```go +import "gopkg.in/hlandau/passlib.v1" + +func RegisterUser() { + (...) + + password := get a (UTF-8, plaintext) password from somewhere + + hash, err := passlib.Hash(password) + if err != nil { + // couldn't hash password for some reason + return + } + + (store hash in database, etc.) +} + +func CheckPassword() bool { + password := get the password the user entered + hash := the hash you stored from the call to Hash() + + newHash, err := passlib.Verify(password, hash) + if err != nil { + // incorrect password, malformed hash, etc. + // either way, reject + return false + } + + // The context has decided, as per its policy, that + // the hash which was used to validate the password + // should be changed. It has upgraded the hash using + // the verified password. + if newHash != "" { + (store newHash in database, replacing old hash) + } + + return true +} +``` + +scrypt Modular Crypt Format +--------------------------- +Since scrypt does not have a pre-existing modular crypt format standard, I made one. It's as follows: + + $s2$N$r$p$salt$hash + +...where `N`, `r` and `p` are the respective difficulty parameters to scrypt as positive decimal integers without leading zeroes, and `salt` and `hash` are base64-encoded binary strings. Note that the RFC 4648 base64 encoding is used (not the one used by sha256-crypt and sha512-crypt). + +TODO +---- + + - PBKDF2 + +Licence +------- +passlib is partially derived from Python's passlib and so maintains its BSD license. + + © 2008-2012 Assurance Technologies LLC. (Python passlib) BSD License + © 2014 Hugo Landau BSD License + diff --git a/vendor/github.com/hlandau/passlib/passlib.go b/vendor/github.com/hlandau/passlib/passlib.go new file mode 100644 index 0000000..889ec79 --- /dev/null +++ b/vendor/github.com/hlandau/passlib/passlib.go @@ -0,0 +1,177 @@ +// Package passlib provides a simple password hashing and verification +// interface abstracting multiple password hashing schemes. +// +// Most people need concern themselves only with the functions Hash +// and Verify, which uses the default context and sensible defaults. +package passlib // import "gopkg.in/hlandau/passlib.v1" + +import "gopkg.in/hlandau/passlib.v1/abstract" +import "gopkg.in/hlandau/passlib.v1/hash/scrypt" +import "gopkg.in/hlandau/passlib.v1/hash/sha2crypt" +import "gopkg.in/hlandau/passlib.v1/hash/bcryptsha256" +import "gopkg.in/hlandau/passlib.v1/hash/bcrypt" +import "gopkg.in/hlandau/easymetric.v1/cexp" + +var cHashCalls = cexp.NewCounter("passlib.ctx.hashCalls") +var cVerifyCalls = cexp.NewCounter("passlib.ctx.verifyCalls") +var cSuccessfulVerifyCalls = cexp.NewCounter("passlib.ctx.successfulVerifyCalls") +var cFailedVerifyCalls = cexp.NewCounter("passlib.ctx.failedVerifyCalls") +var cSuccessfulVerifyCallsWithUpgrade = cexp.NewCounter("passlib.ctx.successfulVerifyCallsWithUpgrade") +var cSuccessfulVerifyCallsDeferringUpgrade = cexp.NewCounter("passlib.ctx.successfulVerifyCallsDeferringUpgrade") + +// The default schemes, most preferred first. The first scheme will be used to +// hash passwords, and any of the schemes may be used to verify existing +// passwords. The contents of this value may change with subsequent releases. +var DefaultSchemes = []abstract.Scheme{ + scrypt.SHA256Crypter, + sha2crypt.Crypter256, + sha2crypt.Crypter512, + bcryptsha256.Crypter, + bcrypt.Crypter, +} + +// A password hashing context, that uses a given set of schemes to hash and +// verify passwords. +type Context struct { + // Slice of schemes to use, most preferred first. + // + // If left uninitialized, a sensible default set of schemes will be used. + // + // An upgrade hash (see the newHash return value of the Verify method of the + // abstract.Scheme interface) will be issued whenever a password is validated + // using a scheme which is not the first scheme in this slice. + Schemes []abstract.Scheme +} + +func (ctx *Context) schemes() []abstract.Scheme { + if ctx.Schemes == nil { + return DefaultSchemes + } + + return ctx.Schemes +} + +// Hashes a UTF-8 plaintext password using the context and produces a password hash. +// +// If stub is "", one is generated automaticaly for the preferred password hashing +// scheme; you should specify stub as "" in almost all cases. +// +// The provided or randomly generated stub is used to deterministically hash +// the password. The returned hash is in modular crypt format. +// +// If the context has not been specifically configured, a sensible default policy +// is used. See the fields of Context. +func (ctx *Context) Hash(password string) (hash string, err error) { + cHashCalls.Add(1) + + return ctx.schemes()[0].Hash(password) +} + +// Verifies a UTF-8 plaintext password using a previously derived password hash +// and the default context. Returns nil err only if the password is valid. +// +// If the hash is determined to be deprecated based on the context policy, and +// the password is valid, the password is hashed using the preferred password +// hashing scheme and returned in newHash. You should use this to upgrade any +// stored password hash in your database. +// +// newHash is empty if the password was not valid or if no upgrade is required. +// +// You should treat any non-nil err as a password verification error. +func (ctx *Context) Verify(password, hash string) (newHash string, err error) { + return ctx.verify(password, hash, true) +} + +// Like Verify, but does not hash an upgrade password when upgrade is required. +func (ctx *Context) VerifyNoUpgrade(password, hash string) error { + _, err := ctx.verify(password, hash, false) + return err +} + +func (ctx *Context) verify(password, hash string, canUpgrade bool) (newHash string, err error) { + cVerifyCalls.Add(1) + + for i, scheme := range ctx.schemes() { + if !scheme.SupportsStub(hash) { + continue + } + + err = scheme.Verify(password, hash) + if err != nil { + cFailedVerifyCalls.Add(1) + return "", err + } + + cSuccessfulVerifyCalls.Add(1) + if i != 0 || scheme.NeedsUpdate(hash) { + if canUpgrade { + cSuccessfulVerifyCallsWithUpgrade.Add(1) + + // If the scheme is not the first scheme, try and rehash with the + // preferred scheme. + if newHash, err2 := ctx.Hash(password); err2 == nil { + return newHash, nil + } + } else { + cSuccessfulVerifyCallsDeferringUpgrade.Add(1) + } + } + + return "", nil + } + + return "", abstract.ErrUnsupportedScheme +} + +// Determines whether a stub or hash needs updating according to the policy of +// the context. +func (ctx *Context) NeedsUpdate(stub string) bool { + for i, scheme := range ctx.schemes() { + if scheme.SupportsStub(stub) { + return i != 0 || scheme.NeedsUpdate(stub) + } + } + + return false +} + +// The default context, which uses sensible defaults. Most users should not +// reconfigure this. The defaults may change over time, so you may wish +// to reconfigure the context or use a custom context if you want precise +// control over the hashes used. +var DefaultContext Context + +// Hashes a UTF-8 plaintext password using the default context and produces a +// password hash. Chooses the preferred password hashing scheme based on the +// configured policy. The default policy is sensible. +func Hash(password string) (hash string, err error) { + return DefaultContext.Hash(password) +} + +// Verifies a UTF-8 plaintext password using a previously derived password hash +// and the default context. Returns nil err only if the password is valid. +// +// If the hash is determined to be deprecated based on policy, and the password +// is valid, the password is hashed using the preferred password hashing scheme +// and returned in newHash. You should use this to upgrade any stored password +// hash in your database. +// +// newHash is empty if the password was invalid or no upgrade is required. +// +// You should treat any non-nil err as a password verification error. +func Verify(password, hash string) (newHash string, err error) { + return DefaultContext.Verify(password, hash) +} + +// Like Verify, but never upgrades. +func VerifyNoUpgrade(password, hash string) error { + return DefaultContext.VerifyNoUpgrade(password, hash) +} + +// Uses the default context to determine whether a stub or hash needs updating. +func NeedsUpdate(stub string) bool { + return DefaultContext.NeedsUpdate(stub) +} + +// © 2008-2012 Assurance Technologies LLC. (Python passlib) BSD License +// © 2014 Hugo Landau BSD License diff --git a/vendor/github.com/icza/session/LICENSE b/vendor/github.com/icza/session/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/icza/session/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/icza/session/README.md b/vendor/github.com/icza/session/README.md new file mode 100644 index 0000000..8658f44 --- /dev/null +++ b/vendor/github.com/icza/session/README.md @@ -0,0 +1,114 @@ +# Session + +[![Build Status](https://travis-ci.org/icza/session.svg?branch=master)](https://travis-ci.org/icza/session) +[![GoDoc](https://godoc.org/github.com/icza/session?status.svg)](https://godoc.org/github.com/icza/session) +[![Go Report Card](https://goreportcard.com/badge/github.com/icza/session)](https://goreportcard.com/report/github.com/icza/session) +[![codecov](https://codecov.io/gh/icza/session/branch/master/graph/badge.svg)](https://codecov.io/gh/icza/session) + +The [Go](https://golang.org/) standard library includes a nice [http server](https://golang.org/pkg/net/http/), but unfortunately it lacks a very basic and important feature: _HTTP session management_. + +This package provides an easy-to-use, extensible and secure session implementation and management. Package documentation can be found and godoc.org: + +https://godoc.org/github.com/icza/session + +This is "just" an HTTP session implementation and management, you can use it as-is, or with any existing Go web toolkits and frameworks. + +## Overview + +There are 3 key _players_ in the package: + +- **`Session`** is the (HTTP) session interface. We can use it to store and retrieve constant and variable attributes from it. +- **`Store`** is a session store interface which is responsible to store sessions and make them retrievable by their IDs at the server side. +- **`Manager`** is a session manager interface which is responsible to acquire a `Session` from an (incoming) HTTP request, and to add a `Session` to an HTTP response to let the client know about the session. A `Manager` has a backing `Store` which is responsible to manage `Session` values at server side. + +_Players_ of this package are represented by interfaces, and various implementations are provided for all these players. +You are not bound by the provided implementations, feel free to provide your own implementations for any of the players. + +## Usage + +Usage can't be simpler than this. To get the current session associated with the [http.Request](https://golang.org/pkg/net/http/#Request): + + sess := session.Get(r) + if sess == nil { + // No session (yet) + } else { + // We have a session, use it + } + +To create a new session (e.g. on a successful login) and add it to an [http.ResponseWriter](https://golang.org/pkg/net/http/#ResponseWriter) (to let the client know about the session): + + sess := session.NewSession() + session.Add(sess, w) + +Let's see a more advanced session creation: let's provide a constant attribute (for the lifetime of the session) and an initial, variable attribute: + + sess := session.NewSessionOptions(&session.SessOptions{ + CAttrs: map[string]interface{}{"UserName": userName}, + Attrs: map[string]interface{}{"Count": 1}, + }) + +And to access these attributes and change value of `"Count"`: + + userName := sess.CAttr("UserName") + count := sess.Attr("Count").(int) // Type assertion, you might wanna check if it succeeds + sess.SetAttr("Count", count+1) // Increment count + +(Of course variable attributes can be added later on too with `Session.SetAttr()`, not just at session creation.) + +To remove a session (e.g. on logout): + + session.Remove(sess, w) + +Check out the [session demo application](https://github.com/icza/session/blob/master/session_demo/session_demo.go) which shows all these in action. + +## Google App Engine support + +The package provides support for Google App Engine (GAE) platform. + +The documentation doesn't include it (due to the `+build appengine` build constraint), but here it is: [gae_memcache_store.go](https://github.com/icza/session/blob/master/gae_memcache_store.go) + +The implementation stores sessions in the Memcache and also saves sessions in the Datastore as a backup +in case data would be removed from the Memcache. This behaviour is optional, Datastore can be disabled completely. +You can also choose whether saving to Datastore happens synchronously (in the same goroutine) +or asynchronously (in another goroutine), resulting in faster response times. + +We can use `NewMemcacheStore()` and `NewMemcacheStoreOptions()` functions to create a session Store implementation +which stores sessions in GAE's Memcache. Important to note that since accessing the Memcache relies on +Appengine Context which is bound to an `http.Request`, the returned Store can only be used for the lifetime of a request! +Note that the Store will automatically "flush" sessions accessed from it when the Store is closed, +so it is very important to close the Store at the end of your request; this is usually done by closing +the session manager to which you passed the store (preferably with the defer statement). + +So in each request handling we have to create a new session manager using a new Store, and we can use the session manager +to do session-related tasks, something like this: + + ctx := appengine.NewContext(r) + sessmgr := session.NewCookieManager(session.NewMemcacheStore(ctx)) + defer sessmgr.Close() // This will ensure changes made to the session are auto-saved + // in Memcache (and optionally in the Datastore). + + sess := sessmgr.Get(r) // Get current session + if sess != nil { + // Session exists, do something with it. + ctx.Infof("Count: %v", sess.Attr("Count")) + } else { + // No session yet, let's create one and add it: + sess = session.NewSession() + sess.SetAttr("Count", 1) + sessmgr.Add(sess, w) + } + +Expired sessions are not automatically removed from the Datastore. To remove expired sessions, the package +provides a `PurgeExpiredSessFromDSFunc()` function which returns an [http.HandlerFunc](https://golang.org/pkg/net/http/#HandlerFunc). +It is recommended to register the returned handler function to a path which then can be defined +as a cron job to be called periodically, e.g. in every 30 minutes or so (your choice). +As cron handlers may run up to 10 minutes, the returned handler will stop at 8 minutes +to complete safely even if there are more expired, undeleted sessions. +It can be registered like this: + + http.HandleFunc("/demo/purge", session.PurgeExpiredSessFromDSFunc("")) + +Check out the GAE session demo application which shows how it can be used. +[cron.yaml](https://github.com/icza/session/blob/master/gae_session_demo/cron.yaml) file of the demo shows how a cron job can be defined to purge expired sessions. + +Check out the [GAE session demo application](https://github.com/icza/session/blob/master/gae_session_demo/gae_session_demo.go) which shows how to use this in action. diff --git a/vendor/github.com/icza/session/cookie_manager.go b/vendor/github.com/icza/session/cookie_manager.go new file mode 100644 index 0000000..bf17f28 --- /dev/null +++ b/vendor/github.com/icza/session/cookie_manager.go @@ -0,0 +1,123 @@ +/* + +A secure, cookie based session Manager implementation. + +*/ + +package session + +import ( + "net/http" + "time" +) + +// CookieManager is a secure, cookie based session Manager implementation. +// Only the session ID is transmitted / stored at the clients, and it is managed using cookies. +type CookieManager struct { + store Store // Backing Store + + sessIDCookieName string // Name of the cookie used for storing the session id + cookieSecure bool // Tells if session ID cookies are to be sent only over HTTPS + cookieMaxAgeSec int // Max age for session ID cookies in seconds + cookiePath string // Cookie path to use +} + +// CookieMngrOptions defines options that may be passed when creating a new CookieManager. +// All fields are optional; default value will be used for any field that has the zero value. +type CookieMngrOptions struct { + // Name of the cookie used for storing the session id; default value is "sessid" + SessIDCookieName string + + // Tells if session ID cookies are allowed to be sent over unsecure HTTP too (else only HTTPS); + // default value is false (only HTTPS) + AllowHTTP bool + + // Max age for session ID cookies; default value is 30 days + CookieMaxAge time.Duration + + // Cookie path to use; default value is the root: "/" + CookiePath string +} + +// Pointer to zero value of CookieMngrOptions to be reused for efficiency. +var zeroCookieMngrOptions = new(CookieMngrOptions) + +// NewCookieManager creates a new, cookie based session Manager with default options. +// Default values of options are listed in the CookieMngrOptions type. +func NewCookieManager(store Store) Manager { + return NewCookieManagerOptions(store, zeroCookieMngrOptions) +} + +// NewCookieManagerOptions creates a new, cookie based session Manager with the specified options. +func NewCookieManagerOptions(store Store, o *CookieMngrOptions) Manager { + m := &CookieManager{ + store: store, + cookieSecure: !o.AllowHTTP, + sessIDCookieName: o.SessIDCookieName, + cookiePath: o.CookiePath, + } + + if m.sessIDCookieName == "" { + m.sessIDCookieName = "sessid" + } + if o.CookieMaxAge == 0 { + m.cookieMaxAgeSec = 30 * 24 * 60 * 60 // 30 days max age + } else { + m.cookieMaxAgeSec = int(o.CookieMaxAge.Seconds()) + } + if m.cookiePath == "" { + m.cookiePath = "/" + } + + return m +} + +// Get is to implement Manager.Get(). +func (m *CookieManager) Get(r *http.Request) Session { + c, err := r.Cookie(m.sessIDCookieName) + if err != nil { + return nil + } + + return m.store.Get(c.Value) +} + +// Add is to implement Manager.Add(). +func (m *CookieManager) Add(sess Session, w http.ResponseWriter) { + // HttpOnly: do not allow non-HTTP access to it (like javascript) to prevent stealing it... + // Secure: only send it over HTTPS + // MaxAge: to specify the max age of the cookie in seconds, else it's a session cookie and gets deleted after the browser is closed. + + c := http.Cookie{ + Name: m.sessIDCookieName, + Value: sess.ID(), + Path: m.cookiePath, + HttpOnly: true, + Secure: m.cookieSecure, + MaxAge: m.cookieMaxAgeSec, + } + http.SetCookie(w, &c) + + m.store.Add(sess) +} + +// Remove is to implement Manager.Remove(). +func (m *CookieManager) Remove(sess Session, w http.ResponseWriter) { + // Set the cookie with empty value and 0 max age + c := http.Cookie{ + Name: m.sessIDCookieName, + Value: "", + Path: m.cookiePath, + HttpOnly: true, + Secure: m.cookieSecure, + MaxAge: -1, // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + } + http.SetCookie(w, &c) + + m.store.Remove(sess) +} + +// Close is to implement Manager.Close(). +func (m *CookieManager) Close() { + m.store.Close() +} diff --git a/vendor/github.com/icza/session/doc.go b/vendor/github.com/icza/session/doc.go new file mode 100644 index 0000000..3268144 --- /dev/null +++ b/vendor/github.com/icza/session/doc.go @@ -0,0 +1,117 @@ +/* + +Package session provides an easy-to-use, extensible and secure HTTP session implementation and management. + +This is "just" an HTTP session implementation and management, you can use it as-is, or with any existing Go web toolkits and frameworks. +Package documentation can be found and godoc.org: + +https://godoc.org/github.com/icza/session + +Overview + +There are 3 key players in the package: + +- Session is the (HTTP) session interface. We can use it to store and retrieve constant and variable attributes from it. + +- Store is a session store interface which is responsible to store sessions and make them retrievable by their IDs at the server side. + +- Manager is a session manager interface which is responsible to acquire a Session from an (incoming) HTTP request, and to add a Session to an HTTP response to let the client know about the session. A Manager has a backing Store which is responsible to manage Session values at server side. + +Players of this package are represented by interfaces, and various implementations are provided for all these players. +You are not bound by the provided implementations, feel free to provide your own implementations for any of the players. + +Usage + +Usage can't be simpler than this. To get the current session associated with the http.Request: + + sess := session.Get(r) + if sess == nil { + // No session (yet) + } else { + // We have a session, use it + } + +To create a new session (e.g. on a successful login) and add it to an http.ResponseWriter (to let the client know about the session): + + sess := session.NewSession() + session.Add(sess, w) + +Let's see a more advanced session creation: let's provide a constant attribute (for the lifetime of the session) and an initial, variable attribute: + + sess := session.NewSessionOptions(&session.SessOptions{ + CAttrs: map[string]interface{}{"UserName": userName}, + Attrs: map[string]interface{}{"Count": 1}, + }) + +And to access these attributes and change value of "Count": + + userName := sess.CAttr("UserName") + count := sess.Attr("Count").(int) // Type assertion, you might wanna check if it succeeds + sess.SetAttr("Count", count+1) // Increment count + +(Of course variable attributes can be added later on too with Session.SetAttr(), not just at session creation.) + +To remove a session (e.g. on logout): + + session.Remove(sess, w) + +Check out the session demo application which shows all these in action: + +https://github.com/icza/session/blob/master/session_demo/session_demo.go + +Google App Engine support + +The package provides support for Google App Engine (GAE) platform. + +The documentation doesn't include it (due to the '+build appengine' build constraint), but here it is: + +https://github.com/icza/session/blob/master/gae_memcache_store.go + +The implementation stores sessions in the Memcache and also saves sessions in the Datastore as a backup +in case data would be removed from the Memcache. This behaviour is optional, Datastore can be disabled completely. +You can also choose whether saving to Datastore happens synchronously (in the same goroutine) +or asynchronously (in another goroutine), resulting in faster response times. + +We can use NewMemcacheStore() and NewMemcacheStoreOptions() functions to create a session Store implementation +which stores sessions in GAE's Memcache. Important to note that since accessing the Memcache relies on +Appengine Context which is bound to an http.Request, the returned Store can only be used for the lifetime of a request! +Note that the Store will automatically "flush" sessions accessed from it when the Store is closed, +so it is very important to close the Store at the end of your request; this is usually done by closing +the session manager to which you passed the store (preferably with the defer statement). + +So in each request handling we have to create a new session manager using a new Store, and we can use the session manager +to do session-related tasks, something like this: + + ctx := appengine.NewContext(r) + sessmgr := session.NewCookieManager(session.NewMemcacheStore(ctx)) + defer sessmgr.Close() // This will ensure changes made to the session are auto-saved + // in Memcache (and optionally in the Datastore). + + sess := sessmgr.Get(r) // Get current session + if sess != nil { + // Session exists, do something with it. + ctx.Infof("Count: %v", sess.Attr("Count")) + } else { + // No session yet, let's create one and add it: + sess = session.NewSession() + sess.SetAttr("Count", 1) + sessmgr.Add(sess, w) + } + +Expired sessions are not automatically removed from the Datastore. To remove expired sessions, the package +provides a PurgeExpiredSessFromDSFunc() function which returns an http.HandlerFunc. +It is recommended to register the returned handler function to a path which then can be defined +as a cron job to be called periodically, e.g. in every 30 minutes or so (your choice). +As cron handlers may run up to 10 minutes, the returned handler will stop at 8 minutes +to complete safely even if there are more expired, undeleted sessions. +It can be registered like this: + + http.HandleFunc("/demo/purge", session.PurgeExpiredSessFromDSFunc("")) + +Check out the GAE session demo application which shows how it can be used. +cron.yaml file of the demo shows how a cron job can be defined to purge expired sessions. + +https://github.com/icza/session/blob/master/gae_session_demo/gae_session_demo.go + +*/ +package session diff --git a/vendor/github.com/icza/session/gae_memcache_store.go b/vendor/github.com/icza/session/gae_memcache_store.go new file mode 100644 index 0000000..2682fac --- /dev/null +++ b/vendor/github.com/icza/session/gae_memcache_store.go @@ -0,0 +1,375 @@ +// +build appengine + +/* + +A Google App Engine Memcache session store implementation. + +The implementation stores sessions in the Memcache and also saves sessions to the Datastore as a backup +in case data would be removed from the Memcache. This behaviour is optional, Datastore can be disabled completely. +You can also choose whether saving to Datastore happens synchronously (in the same goroutine) +or asynchronously (in another goroutine). + +Limitations based on GAE Memcache: + +- Since session ids are used in the Memcache keys, session ids can't be longer than 250 chars (bytes, but with Base64 charset it's the same). +If you also specify a key prefix (in MemcacheStoreOptions), that also counts into it. + +- The size of a Session cannot be larger than 1 MB (marshalled into a byte slice). + +Note that the Store will automatically "flush" sessions accessed from it when the Store is closed, +so it is very important to close the Store at the end of your request; this is usually done by closing +the session manager to which you passed the store (preferably with the defer statement). + +Check out the GAE session demo application which shows how to use it properly: + +https://github.com/icza/session/blob/master/gae_session_demo/session_demo.go + +*/ + +package session + +import ( + "net/http" + "sync" + "time" + + "appengine" + "appengine/datastore" + "appengine/memcache" +) + +// A Google App Engine Memcache session store implementation. +type memcacheStore struct { + ctx appengine.Context // Appengine context used when accessing the Memcache + + keyPrefix string // Prefix to use in front of session ids to construct Memcache key + retries int // Number of retries to perform in case of general Memcache failures + + codec memcache.Codec // Codec used to marshal and unmarshal a Session to a byte slice + + onlyMemcache bool // Tells if sessions are not to be saved in Datastore + asyncDatastoreSave bool // Tells if saving in Datastore should happen asynchronously, in a new goroutine + dsEntityName string // Name of the datastore entity to use to save sessions + + // Map of sessions (mapped from ID) that were accessed using this store; usually it will only be 1. + // It is also used as a cache, should the user call Get() with the same id multiple times. + sessions map[string]Session + + mux *sync.RWMutex // mutex to synchronize access to sessions +} + +// MemcacheStoreOptions defines options that may be passed when creating a new Memcache session store. +// All fields are optional; default value will be used for any field that has the zero value. +type MemcacheStoreOptions struct { + // Prefix to use when storing sessions in the Memcache, cannot contain a null byte + // and cannot be longer than 250 chars (bytes) when concatenated with the session id; default value is the empty string + // The Memcache key will be this prefix and the session id concatenated. + KeyPrefix string + + // Number of retries to perform if Memcache operations fail due to general service error; + // default value is 3 + Retries int + + // Codec used to marshal and unmarshal a Session to a byte slice; + // Default value is &memcache.Gob (which uses the gob package). + Codec *memcache.Codec + + // Tells if sessions are only to be stored in Memcache, and do not store them in Datastore as backup; + // as Memcache has no guarantees, it may lose content from time to time, but if Datastore is + // also used, the session will automatically be retrieved from the Datastore if not found in Memcache; + // default value is false (which means to also save sessions in the Datastore) + OnlyMemcache bool + + // Tells if saving in Datastore should happen asynchronously (in a new goroutine, possibly after returning), + // if false, session saving in Datastore will happen in the same goroutine, before returning from the request. + // Asynchronous saving gives smaller latency (and is enough most of the time as Memcache is always checked first); + // default value is false which means to save sessions in the Datastore in the same goroutine, synchronously + // Not used if OnlyMemcache=true. + // FIXME: See https://github.com/icza/session/issues/3 + AsyncDatastoreSave bool + + // Name of the entity to use for saving sessions; + // default value is "sess_" + // Not used if OnlyMemcache=true. + DSEntityName string +} + +// SessEntity models the session entity saved to Datastore. +// The Key is the session id. +type SessEntity struct { + Expires time.Time `datastore:"exp"` + Value []byte `datastore:"val"` +} + +// Pointer to zero value of MemcacheStoreOptions to be reused for efficiency. +var zeroMemcacheStoreOptions = new(MemcacheStoreOptions) + +// NewMemcacheStore returns a new, GAE Memcache session Store with default options. +// Default values of options are listed in the MemcacheStoreOptions type. +// +// Important! Since accessing the Memcache relies on Appengine Context +// which is bound to an http.Request, the returned Store can only be used for the lifetime of a request! +func NewMemcacheStore(ctx appengine.Context) Store { + return NewMemcacheStoreOptions(ctx, zeroMemcacheStoreOptions) +} + +const defaultDSEntityName = "sess_" // Default value of DSEntityName. + +// NewMemcacheStoreOptions returns a new, GAE Memcache session Store with the specified options. +// +// Important! Since accessing the Memcache relies on Appengine Context +// which is bound to an http.Request, the returned Store can only be used for the lifetime of a request! +func NewMemcacheStoreOptions(ctx appengine.Context, o *MemcacheStoreOptions) Store { + s := &memcacheStore{ + ctx: ctx, + keyPrefix: o.KeyPrefix, + retries: o.Retries, + onlyMemcache: o.OnlyMemcache, + asyncDatastoreSave: o.AsyncDatastoreSave, + dsEntityName: o.DSEntityName, + sessions: make(map[string]Session, 2), + mux: &sync.RWMutex{}, + } + if s.retries <= 0 { + s.retries = 3 + } + if o.Codec != nil { + s.codec = *o.Codec + } else { + s.codec = memcache.Gob + } + if s.dsEntityName == "" { + s.dsEntityName = defaultDSEntityName + } + return s +} + +// Get is to implement Store.Get(). +// Important! Since sessions are marshalled and stored in the Memcache, +// the mutex of the Session (Session.RWMutex()) will be different for each +// Session value (even though they might have the same session id)! +func (s *memcacheStore) Get(id string) Session { + s.mux.RLock() + defer s.mux.RUnlock() + + // First check our "cache" + if sess := s.sessions[id]; sess != nil { + return sess + } + + // Next check in Memcache + var err error + var sess *sessionImpl + + for i := 0; i < s.retries; i++ { + var sess_ sessionImpl + _, err = s.codec.Get(s.ctx, s.keyPrefix+id, &sess_) + if err == memcache.ErrCacheMiss { + break // It's not in the Memcache (e.g. invalid sess id or was removed from Memcache by AppEngine) + } + if err == nil { + sess = &sess_ + break + } + // Service error? Retry.. + } + + if sess == nil { + if err != nil && err != memcache.ErrCacheMiss { + s.ctx.Errorf("Failed to get session from memcache, id: %s, error: %v", id, err) + } + + // Ok, we didn't get it from Memcace (either was not there or Memcache service is unavailable). + // Now it's time to check in the Datastore. + key := datastore.NewKey(s.ctx, s.dsEntityName, id, 0, nil) + for i := 0; i < s.retries; i++ { + e := SessEntity{} + err = datastore.Get(s.ctx, key, &e) + if err == datastore.ErrNoSuchEntity { + return nil // It's not in the Datastore either + } + if err != nil { + // Service error? Retry.. + continue + } + if e.Expires.Before(time.Now()) { + // Session expired. + datastore.Delete(s.ctx, key) // Omitting error check... + return nil + } + var sess_ sessionImpl + if err = s.codec.Unmarshal(e.Value, &sess_); err != nil { + break // Invalid data in stored session entity... + } + sess = &sess_ + break + } + } + + if sess == nil { + s.ctx.Errorf("Failed to get session from datastore, id: %s, error: %v", id, err) + return nil + } + + // Yes! We have it! "Actualize" it. + sess.Access() + // Mutex is not marshalled, so create a new one: + sess.mux = &sync.RWMutex{} + s.sessions[id] = sess + return sess +} + +// Add is to implement Store.Add(). +func (s *memcacheStore) Add(sess Session) { + s.mux.Lock() + defer s.mux.Unlock() + + if s.setMemcacheSession(sess) { + s.ctx.Infof("Session added: %s", sess.ID()) + s.sessions[sess.ID()] = sess + return + } +} + +// setMemcacheSession sets the specified session in the Memcache. +func (s *memcacheStore) setMemcacheSession(sess Session) (success bool) { + item := &memcache.Item{ + Key: s.keyPrefix + sess.ID(), + Object: sess, + Expiration: sess.Timeout(), + } + + var err error + for i := 0; i < s.retries; i++ { + if err = s.codec.Set(s.ctx, item); err == nil { + return true + } + } + + s.ctx.Errorf("Failed to add session to memcache, id: %s, error: %v", sess.ID(), err) + return false +} + +// Remove is to implement Store.Remove(). +func (s *memcacheStore) Remove(sess Session) { + s.mux.Lock() + defer s.mux.Unlock() + + var err error + for i := 0; i < s.retries; i++ { + if err = memcache.Delete(s.ctx, s.keyPrefix+sess.ID()); err == nil || err == memcache.ErrCacheMiss { + s.ctx.Infof("Session removed: %s", sess.ID()) + delete(s.sessions, sess.ID()) + if !s.onlyMemcache { + // Also from the Datastore: + key := datastore.NewKey(s.ctx, s.dsEntityName, sess.ID(), 0, nil) + datastore.Delete(s.ctx, key) // Omitting error check... + } + return + } + } + s.ctx.Errorf("Failed to remove session from memcache, id: %s, error: %v", sess.ID(), err) +} + +// Close is to implement Store.Close(). +func (s *memcacheStore) Close() { + // Flush out sessions that were accessed from this store. No need locking, we're closing... + // We could use Cocec.SetMulti(), but sessions will contain at most 1 session like all the times. + for _, sess := range s.sessions { + s.setMemcacheSession(sess) + } + + if s.onlyMemcache { + return // Don't save to Datastore + } + + if s.asyncDatastoreSave { + go s.saveToDatastore() + } else { + s.saveToDatastore() + } +} + +// saveToDatastore saves the sessions of the Store to the Datastore +// in the caller's goroutine. +func (s *memcacheStore) saveToDatastore() { + // Save sessions that were accessed from this store. No need locking, we're closing... + // We could use datastore.PutMulti(), but sessions will contain at most 1 session like all the times. + for _, sess := range s.sessions { + value, err := s.codec.Marshal(sess) + if err != nil { + s.ctx.Errorf("Failed to marshal session: %s, error: %v", sess.ID(), err) + continue + } + e := SessEntity{ + Expires: sess.Accessed().Add(sess.Timeout()), + Value: value, + } + key := datastore.NewKey(s.ctx, s.dsEntityName, sess.ID(), 0, nil) + for i := 0; i < s.retries; i++ { + if _, err = datastore.Put(s.ctx, key, &e); err == nil { + break + } + } + if err != nil { + s.ctx.Errorf("Failed to save session to datastore: %s, error: %v", sess.ID(), err) + } + } +} + +// PurgeExpiredSessFromDSFunc returns a request handler function which deletes expired sessions +// from the Datastore. +// dsEntityName is the name of the entity used for saving sessions; pass an empty string +// to use the default value (which is "sess_"). +// +// It is recommended to register the returned handler function to a path which then can be defined +// as a cron job to be called periodically, e.g. in every 30 minutes or so (your choice). +// As cron handlers may run up to 10 minutes, the returned handler will stop at 8 minutes +// to complete safely even if there are more expired, undeleted sessions. +// +// The response of the handler func is a JSON text telling if the handler was able to delete all expired sessions, +// or that it was finished early due to the time. Examle of a respone where all expired sessions were deleted: +// +// {"completed":true} +func PurgeExpiredSessFromDSFunc(dsEntityName string) http.HandlerFunc { + if dsEntityName == "" { + dsEntityName = defaultDSEntityName + } + + return func(w http.ResponseWriter, r *http.Request) { + c := appengine.NewContext(r) + // Delete in batches of 100 + q := datastore.NewQuery(dsEntityName).Filter("exp<", time.Now()).KeysOnly().Limit(100) + + deadline := time.Now().Add(time.Minute * 8) + + for { + var err error + var keys []*datastore.Key + + if keys, err = q.GetAll(c, nil); err != nil { + // Datastore error. + c.Errorf("Failed to query expired sessions: %v", err) + http.Error(w, "Failed to query expired sessions!", http.StatusInternalServerError) + } + if len(keys) == 0 { + // We're done, no more expired sessions + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"completed":true}`)) + return + } + + if err = datastore.DeleteMulti(c, keys); err != nil { + c.Errorf("Error while deleting expired sessions: %v", err) + } + + if time.Now().After(deadline) { + // Our time is up, return + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"completed":false}`)) + return + } + // We have time to continue + } + } +} diff --git a/vendor/github.com/icza/session/global.go b/vendor/github.com/icza/session/global.go new file mode 100644 index 0000000..f24d703 --- /dev/null +++ b/vendor/github.com/icza/session/global.go @@ -0,0 +1,39 @@ +/* + +A global session Manager and delegator functions - for easy to use. + +*/ + +package session + +import ( + "net/http" +) + +// Global is the default session Manager to which the top-level functions such as Get, Add, Remove and Close +// are wrappers of Manager. +// You may replace this and keep using the top-level functions, but if you intend to do so, +// you should close it first with Global.Close(). +var Global = NewCookieManager(NewInMemStore()) + +// Get delegates to Global.Get(); returns the session specified by the HTTP request. +// nil is returned if the request does not contain a session, or the contained session is not know by this manager. +func Get(r *http.Request) Session { + return Global.Get(r) +} + +// Add delegates to Global.Add(); adds the session to the HTTP response. +// This means to let the client know about the specified session by including the sesison id in the response somehow. +func Add(sess Session, w http.ResponseWriter) { + Global.Add(sess, w) +} + +// Remove delegates to Global.Remove(); removes the session from the HTTP response. +func Remove(sess Session, w http.ResponseWriter) { + Global.Remove(sess, w) +} + +// Close delegates to Global.Close(); closes the session manager, releasing any resources that were allocated. +func Close() { + Global.Close() +} diff --git a/vendor/github.com/icza/session/inmem_store.go b/vendor/github.com/icza/session/inmem_store.go new file mode 100644 index 0000000..104d82c --- /dev/null +++ b/vendor/github.com/icza/session/inmem_store.go @@ -0,0 +1,145 @@ +/* + +An in-memory session store implementation. + +*/ + +package session + +import ( + "log" + "sync" + "time" +) + +// In-memory session Store implementation. +type inMemStore struct { + sessions map[string]Session // Map of sessions (mapped from ID) + mux *sync.RWMutex // mutex to synchronize access to sessions + ticker *time.Ticker // Ticker for the session cleaner + closeTicker chan struct{} // Channel to signal close for the session cleaner +} + +// InMemStoreOptions defines options that may be passed when creating a new in-memory Store. +// All fields are optional; default value will be used for any field that has the zero value. +type InMemStoreOptions struct { + // Session cleaner check interval, default is 10 seconds. + SessCleanerInterval time.Duration +} + +// Pointer to zero value of InMemStoreOptions to be reused for efficiency. +var zeroInMemStoreOptions = new(InMemStoreOptions) + +// NewInMemStore returns a new, in-memory session Store with the default options. +// Default values of options are listed in the InMemStoreOptions type. +// The returned Store has an automatic session cleaner which runs +// in its own goroutine. +func NewInMemStore() Store { + return NewInMemStoreOptions(zeroInMemStoreOptions) +} + +// NewInMemStoreOptions returns a new, in-memory session Store with the specified options. +// The returned Store has an automatic session cleaner which runs +// in its own goroutine. +func NewInMemStoreOptions(o *InMemStoreOptions) Store { + s := &inMemStore{ + sessions: make(map[string]Session), + mux: &sync.RWMutex{}, + closeTicker: make(chan struct{}), + } + + interval := o.SessCleanerInterval + if interval == 0 { + interval = 10 * time.Second + } + + go s.sessCleaner(interval) + + return s +} + +// sessCleaner periodically checks whether sessions have timed out +// in an endless loop. If a session has timed out, removes it. +// This method is to be started as a new goroutine. +func (s *inMemStore) sessCleaner(interval time.Duration) { + ticker := time.NewTicker(interval) + + for { + select { + case <-s.closeTicker: + // We are being shut down... + ticker.Stop() + return + case now := <-ticker.C: + // Do a sweep. + // Remove is very rare compared to the number of checks, so: + // "Quick" check with read-lock to see if there's anything to remove: + // Note: Session.Access() is called with s.mux, the same mutex we use + // when looking for timed-out sessions, so we're good. + needRemove := func() bool { + s.mux.RLock() // Read lock is enough + defer s.mux.RUnlock() + + for _, sess := range s.sessions { + if now.Sub(sess.Accessed()) > sess.Timeout() { + return true + } + } + return false + }() + if !needRemove { + continue + } + + // Remove required: + func() { + s.mux.Lock() // Read-write lock required + defer s.mux.Unlock() + + for _, sess := range s.sessions { + if now.Sub(sess.Accessed()) > sess.Timeout() { + log.Println("Session timed out:", sess.ID()) + delete(s.sessions, sess.ID()) + } + } + }() + } + } +} + +// Get is to implement Store.Get(). +func (s *inMemStore) Get(id string) Session { + s.mux.RLock() + defer s.mux.RUnlock() + + sess := s.sessions[id] + if sess == nil { + return nil + } + + sess.Access() + return sess +} + +// Add is to implement Store.Add(). +func (s *inMemStore) Add(sess Session) { + s.mux.Lock() + defer s.mux.Unlock() + + log.Println("Session added:", sess.ID()) + s.sessions[sess.ID()] = sess +} + +// Remove is to implement Store.Remove(). +func (s *inMemStore) Remove(sess Session) { + s.mux.Lock() + defer s.mux.Unlock() + + log.Println("Session removed:", sess.ID()) + delete(s.sessions, sess.ID()) +} + +// Close is to implement Store.Close(). +func (s *inMemStore) Close() { + close(s.closeTicker) +} diff --git a/vendor/github.com/icza/session/manager.go b/vendor/github.com/icza/session/manager.go new file mode 100644 index 0000000..15b4e83 --- /dev/null +++ b/vendor/github.com/icza/session/manager.go @@ -0,0 +1,31 @@ +/* + +Session Manager interface. + +*/ + +package session + +import ( + "net/http" +) + +// Manager is a session manager interface. +// A session manager is responsible to acquire a Session from an (incoming) HTTP request, +// and to add a Session to an HTTP response to let the client know about the session. +// A Manager has a backing Store which is responsible to manage Session values at server side. +type Manager interface { + // Get returns the session specified by the HTTP request. + // nil is returned if the request does not contain a session, or the contained session is not know by this manager. + Get(r *http.Request) Session + + // Add adds the session to the HTTP response. + // This means to let the client know about the specified session by including the sesison id in the response somehow. + Add(sess Session, w http.ResponseWriter) + + // Remove removes the session from the HTTP response. + Remove(sess Session, w http.ResponseWriter) + + // Close closes the session manager, releasing any resources that were allocated. + Close() +} diff --git a/vendor/github.com/icza/session/session.go b/vendor/github.com/icza/session/session.go new file mode 100644 index 0000000..91825d3 --- /dev/null +++ b/vendor/github.com/icza/session/session.go @@ -0,0 +1,231 @@ +/* + +Session interface and its implementation. + +*/ + +package session + +import ( + "crypto/rand" + "encoding/base64" + "io" + "sync" + "time" +) + +// Session is the (HTTP) session interface. +// We can use it to store and retrieve constant and variable attributes from it. +type Session interface { + // ID returns the id of the session. + ID() string + + // New tells if the session is new. + // Implementation is based on whether created and access times are equal. + New() bool + + // CAttr returns the value of an attribute provided at session creation. + // These attributes cannot be changes during the lifetime of a session, + // so they can be accessed safely without synchronization. Exampe is storing the + // authenticated user. + CAttr(name string) interface{} + + // Attr returns the value of an attribute stored in the session. + // Safe for concurrent use. + Attr(name string) interface{} + + // SetAttr sets the value of an attribute stored in the session. + // Pass the nil value to delete the attribute. + // Safe for concurrent use. + SetAttr(name string, value interface{}) + + // Attrs returns a copy of all the attribute values stored in the session. + // Safe for concurrent use. + Attrs() map[string]interface{} + + // Created returns the session creation time. + Created() time.Time + + // Accessed returns the time when the session was last accessed. + Accessed() time.Time + + // Timeout returns the session timeout. + // A session may be removed automatically if it is not accessed for this duration. + Timeout() time.Duration + + // Mutex returns the RW mutex of the session. + // It is used to synchronize access/modification of the state stored in the session. + // It can be used if session-level synchronization is required. + // Important! If Session values are marshalled / unmarshalled + // (e.g. multi server instance environment such as Google AppEngine), + // this mutex may be different for each Session value and thus + // it can only be used to session-value level synchronization! + Mutex() *sync.RWMutex + + // Access registers an access to the session, + // updates its last accessed time to the current time. + // Users do not need to call this as the session store is responsible for that. + Access() +} + +// Session implementation. +// Fields are exported so a session may be marshalled / unmarshalled. +type sessionImpl struct { + IDF string // ID of the session + CreatedF time.Time // Creation time + AccessedF time.Time // Last accessed time + CAttrsF map[string]interface{} // Constant attributes specified at session creation + AttrsF map[string]interface{} // Attributes stored in the session + TimeoutF time.Duration // Session timeout + mux *sync.RWMutex // RW mutex to synchronize session state access +} + +// SessOptions defines options that may be passed when creating a new Session. +// All fields are optional; default value will be used for any field that has the zero value. +type SessOptions struct { + // Constant attributes of the session. These be will available via the Session.CAttr() method, without synchronization. + // Values from the map will be copied, and will be available via Session.CAttr(). + CAttrs map[string]interface{} + + // Initial, non-constant attributes to be stored in the session. + // Values from the map will be copied, and will be available via Session.Attr() and Session.Attrs, + // and may be changed with Session.SetAttr(). + Attrs map[string]interface{} + + // Session timeout, default is 30 minutes. + Timeout time.Duration + + // Byte-length of the information that builds up the session ids. + // Using Base-64 encoding, id length will be this multiplied by 4/3 chars. + // Default value is 18 (which means length of ID will be 24 chars). + IDLength int +} + +// Pointer to zero value of SessOptions to be reused for efficiency. +var zeroSessOptions = new(SessOptions) + +// NewSession creates a new Session with the default options. +// Default values of options are listed in the SessOptions type. +func NewSession() Session { + return NewSessionOptions(zeroSessOptions) +} + +// NewSessionOptions creates a new Session with the specified options. +func NewSessionOptions(o *SessOptions) Session { + now := time.Now() + idLength := o.IDLength + if idLength <= 0 { + idLength = 18 + } + timeout := o.Timeout + if timeout == 0 { + timeout = 30 * time.Minute + } + + sess := sessionImpl{ + IDF: genID(idLength), + CreatedF: now, + AccessedF: now, + AttrsF: make(map[string]interface{}), + TimeoutF: timeout, + mux: &sync.RWMutex{}, + } + + if len(o.CAttrs) > 0 { + sess.CAttrsF = make(map[string]interface{}, len(o.CAttrs)) + for k, v := range o.CAttrs { + sess.CAttrsF[k] = v + } + } + + for k, v := range o.Attrs { + sess.AttrsF[k] = v + } + + return &sess +} + +// genID generates a secure, random session id using the crypto/rand package. +func genID(length int) string { + r := make([]byte, length) + io.ReadFull(rand.Reader, r) + return base64.URLEncoding.EncodeToString(r) +} + +// ID is to implement Session.ID(). +func (s *sessionImpl) ID() string { + return s.IDF +} + +// New is to implement Session.New(). +func (s *sessionImpl) New() bool { + return s.CreatedF == s.AccessedF +} + +// CAttr is to implement Session.CAttr(). +func (s *sessionImpl) CAttr(name string) interface{} { + return s.CAttrsF[name] +} + +// Attr is to implement Session.Attr(). +func (s *sessionImpl) Attr(name string) interface{} { + s.mux.RLock() + defer s.mux.RUnlock() + + return s.AttrsF[name] +} + +// SetAttr is to implement Session.SetAttr(). +func (s *sessionImpl) SetAttr(name string, value interface{}) { + s.mux.Lock() + defer s.mux.Unlock() + + if value == nil { + delete(s.AttrsF, name) + } else { + s.AttrsF[name] = value + } +} + +// Attrs is to implement Session.Attrs(). +func (s *sessionImpl) Attrs() map[string]interface{} { + s.mux.RLock() + defer s.mux.RUnlock() + + m := make(map[string]interface{}, len(s.AttrsF)) + for k, v := range s.AttrsF { + m[k] = v + } + return m +} + +// Created is to implement Session.Created(). +func (s *sessionImpl) Created() time.Time { + return s.CreatedF +} + +// Accessed is to implement Session.Accessed(). +func (s *sessionImpl) Accessed() time.Time { + s.mux.RLock() + defer s.mux.RUnlock() + + return s.AccessedF +} + +// Timeout is to implement Session.Timeout(). +func (s *sessionImpl) Timeout() time.Duration { + return s.TimeoutF +} + +// Mutex is to implement Session.Mutex(). +func (s *sessionImpl) Mutex() *sync.RWMutex { + return s.mux +} + +// Access is to implement Session.Access(). +func (s *sessionImpl) Access() { + s.mux.Lock() + defer s.mux.Unlock() + + s.AccessedF = time.Now() +} diff --git a/vendor/github.com/icza/session/store.go b/vendor/github.com/icza/session/store.go new file mode 100644 index 0000000..d957fb4 --- /dev/null +++ b/vendor/github.com/icza/session/store.go @@ -0,0 +1,25 @@ +/* + +Session Store interface. + +*/ + +package session + +// Store is a session store interface. +// A session store is responsible to store sessions and make them retrievable by their IDs at the server side. +type Store interface { + // Get returns the session specified by its id. + // The returned session will have an updated access time (set to the current time). + // nil is returned if this store does not contain a session with the specified id. + Get(id string) Session + + // Add adds a new session to the store. + Add(sess Session) + + // Remove removes a session from the store. + Remove(sess Session) + + // Close closes the session store, releasing any resources that were allocated. + Close() +} diff --git a/vendor/github.com/justinas/nosurf/LICENSE b/vendor/github.com/justinas/nosurf/LICENSE new file mode 100644 index 0000000..ea44069 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Justinas Stankevicius + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/justinas/nosurf/README.md b/vendor/github.com/justinas/nosurf/README.md new file mode 100644 index 0000000..5f9fc7d --- /dev/null +++ b/vendor/github.com/justinas/nosurf/README.md @@ -0,0 +1,125 @@ +# nosurf + +[![Build Status](https://travis-ci.org/justinas/nosurf.svg?branch=master)](https://travis-ci.org/justinas/nosurf) +[![GoDoc](http://godoc.org/github.com/justinas/nosurf?status.png)](http://godoc.org/github.com/justinas/nosurf) + +`nosurf` is an HTTP package for Go +that helps you prevent Cross-Site Request Forgery attacks. +It acts like a middleware and therefore +is compatible with basically any Go HTTP application. + +### Why? +Even though CSRF is a prominent vulnerability, +Go's web-related package infrastructure mostly consists of +micro-frameworks that neither do implement CSRF checks, +nor should they. + +`nosurf` solves this problem by providing a `CSRFHandler` +that wraps your `http.Handler` and checks for CSRF attacks +on every non-safe (non-GET/HEAD/OPTIONS/TRACE) method. + +`nosurf` requires Go 1.1 or later. + +### Features + +* Supports any `http.Handler` (frameworks, your own handlers, etc.) +and acts like one itself. +* Allows exempting specific endpoints from CSRF checks by +an exact URL, a glob, or a regular expression. +* Allows specifying your own failure handler. +Want to present the hacker with an ASCII middle finger +instead of the plain old `HTTP 400`? No problem. +* Uses masked tokens to mitigate the BREACH attack. +* Has no dependencies outside the Go standard library. + +### Example +```go +package main + +import ( + "fmt" + "github.com/justinas/nosurf" + "html/template" + "net/http" +) + +var templateString string = ` + + + +{{ if .name }} +

Your name: {{ .name }}

+{{ end }} +
+ + + + + +
+ + +` +var templ = template.Must(template.New("t1").Parse(templateString)) + +func myFunc(w http.ResponseWriter, r *http.Request) { + context := make(map[string]string) + context["token"] = nosurf.Token(r) + if r.Method == "POST" { + context["name"] = r.FormValue("name") + } + + templ.Execute(w, context) +} + +func main() { + myHandler := http.HandlerFunc(myFunc) + fmt.Println("Listening on http://127.0.0.1:8000/") + http.ListenAndServe(":8000", nosurf.New(myHandler)) +} +``` + +### Manual token verification +In some cases the CSRF token may be send through a non standard way, +e.g. a body or request is a JSON encoded message with one of the fields +being a token. + +In such case the handler(path) should be excluded from an automatic +verification by using one of the exemption methods: + +```go + func (h *CSRFHandler) ExemptFunc(fn func(r *http.Request) bool) + func (h *CSRFHandler) ExemptGlob(pattern string) + func (h *CSRFHandler) ExemptGlobs(patterns ...string) + func (h *CSRFHandler) ExemptPath(path string) + func (h *CSRFHandler) ExemptPaths(paths ...string) + func (h *CSRFHandler) ExemptRegexp(re interface{}) + func (h *CSRFHandler) ExemptRegexps(res ...interface{}) +``` + +Later on, the token **must** be verified by manually getting the token from the cookie +and providing the token sent in body through: `VerifyToken(tkn, tkn2 string) bool`. + +Example: +```go +func HandleJson(w http.ResponseWriter, r *http.Request) { + d := struct{ + X,Y int + Tkn string + }{} + json.Unmarshal(ioutil.ReadAll(r.Body), &d) + if !nosurf.VerifyToken(Token(r), d.Tkn) { + http.Errorf(w, "CSRF token incorrect", http.StatusBadRequest) + return + } + // do smth cool +} +``` + +### Contributing + +0. Find an issue that bugs you / open a new one. +1. Discuss. +2. Branch off, commit, test. +3. Make a pull request / attach the commits to the issue. diff --git a/vendor/github.com/justinas/nosurf/context.go b/vendor/github.com/justinas/nosurf/context.go new file mode 100644 index 0000000..fb6b83d --- /dev/null +++ b/vendor/github.com/justinas/nosurf/context.go @@ -0,0 +1,60 @@ +// +build go1.7 + +package nosurf + +import "net/http" + +type ctxKey int + +const ( + nosurfKey ctxKey = iota +) + +type csrfContext struct { + // The masked, base64 encoded token + // That's suitable for use in form fields, etc. + token string + // reason for the failure of CSRF check + reason error +} + +// Token takes an HTTP request and returns +// the CSRF token for that request +// or an empty string if the token does not exist. +// +// Note that the token won't be available after +// CSRFHandler finishes +// (that is, in another handler that wraps it, +// or after the request has been served) +func Token(req *http.Request) string { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + + return ctx.token +} + +// Reason takes an HTTP request and returns +// the reason of failure of the CSRF check for that request +// +// Note that the same availability restrictions apply for Reason() as for Token(). +func Reason(req *http.Request) error { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + + return ctx.reason +} + +func ctxClear(_ *http.Request) { +} + +func ctxSetToken(req *http.Request, token []byte) { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + ctx.token = b64encode(maskToken(token)) +} + +func ctxSetReason(req *http.Request, reason error) { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + if ctx.token == "" { + panic("Reason should never be set when there's no token in the context yet.") + } + + ctx.reason = reason +} diff --git a/vendor/github.com/justinas/nosurf/context_legacy.go b/vendor/github.com/justinas/nosurf/context_legacy.go new file mode 100644 index 0000000..81e1b89 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/context_legacy.go @@ -0,0 +1,101 @@ +// +build !go1.7 + +package nosurf + +import ( + "net/http" + "sync" +) + +// This file implements a context similar to one found +// in gorilla/context, but tailored specifically for our use case +// and not using gorilla's package just because. + +type csrfContext struct { + // The masked, base64 encoded token + // That's suitable for use in form fields, etc. + token string + // reason for the failure of CSRF check + reason error +} + +var ( + contextMap = make(map[*http.Request]*csrfContext) + cmMutex = new(sync.RWMutex) +) + +// Token() takes an HTTP request and returns +// the CSRF token for that request +// or an empty string if the token does not exist. +// +// Note that the token won't be available after +// CSRFHandler finishes +// (that is, in another handler that wraps it, +// or after the request has been served) +func Token(req *http.Request) string { + cmMutex.RLock() + defer cmMutex.RUnlock() + + ctx, ok := contextMap[req] + + if !ok { + return "" + } + + return ctx.token +} + +// Reason() takes an HTTP request and returns +// the reason of failure of the CSRF check for that request +// +// Note that the same availability restrictions apply for Reason() as for Token(). +func Reason(req *http.Request) error { + cmMutex.RLock() + defer cmMutex.RUnlock() + + ctx, ok := contextMap[req] + + if !ok { + return nil + } + + return ctx.reason +} + +// Takes a raw token, masks it with a per-request key, +// encodes in base64 and makes it available to the wrapped handler +func ctxSetToken(req *http.Request, token []byte) *http.Request { + cmMutex.Lock() + defer cmMutex.Unlock() + + ctx, ok := contextMap[req] + if !ok { + ctx = new(csrfContext) + contextMap[req] = ctx + } + + ctx.token = b64encode(maskToken(token)) + + return req +} + +func ctxSetReason(req *http.Request, reason error) *http.Request { + cmMutex.Lock() + defer cmMutex.Unlock() + + ctx, ok := contextMap[req] + if !ok { + panic("Reason should never be set when there's no token" + + " (context) yet.") + } + + ctx.reason = reason + return req +} + +func ctxClear(req *http.Request) { + cmMutex.Lock() + defer cmMutex.Unlock() + + delete(contextMap, req) +} diff --git a/vendor/github.com/justinas/nosurf/crypto.go b/vendor/github.com/justinas/nosurf/crypto.go new file mode 100644 index 0000000..68817f2 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/crypto.go @@ -0,0 +1,54 @@ +package nosurf + +import ( + "crypto/rand" + "io" +) + +// Masks/unmasks the given data *in place* +// with the given key +// Slices must be of the same length, or oneTimePad will panic +func oneTimePad(data, key []byte) { + n := len(data) + if n != len(key) { + panic("Lengths of slices are not equal") + } + + for i := 0; i < n; i++ { + data[i] ^= key[i] + } +} + +func maskToken(data []byte) []byte { + if len(data) != tokenLength { + return nil + } + + // tokenLength*2 == len(enckey + token) + result := make([]byte, 2*tokenLength) + // the first half of the result is the OTP + // the second half is the masked token itself + key := result[:tokenLength] + token := result[tokenLength:] + copy(token, data) + + // generate the random token + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + + oneTimePad(token, key) + return result +} + +func unmaskToken(data []byte) []byte { + if len(data) != tokenLength*2 { + return nil + } + + key := data[:tokenLength] + token := data[tokenLength:] + oneTimePad(token, key) + + return token +} diff --git a/vendor/github.com/justinas/nosurf/exempt.go b/vendor/github.com/justinas/nosurf/exempt.go new file mode 100644 index 0000000..f49a444 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/exempt.go @@ -0,0 +1,108 @@ +package nosurf + +import ( + "fmt" + "net/http" + pathModule "path" + "reflect" + "regexp" +) + +// Checks if the given request is exempt from CSRF checks. +// It checks the ExemptFunc first, then the exact paths, +// then the globs and finally the regexps. +func (h *CSRFHandler) IsExempt(r *http.Request) bool { + if h.exemptFunc != nil && h.exemptFunc(r) { + return true + } + + path := r.URL.Path + if sContains(h.exemptPaths, path) { + return true + } + + // then the globs + for _, glob := range h.exemptGlobs { + matched, err := pathModule.Match(glob, path) + if matched && err == nil { + return true + } + } + + // finally, the regexps + for _, re := range h.exemptRegexps { + if re.MatchString(path) { + return true + } + } + + return false +} + +// Exempts an exact path from CSRF checks +// With this (and other Exempt* methods) +// you should take note that Go's paths +// include a leading slash. +func (h *CSRFHandler) ExemptPath(path string) { + h.exemptPaths = append(h.exemptPaths, path) +} + +// A variadic argument version of ExemptPath() +func (h *CSRFHandler) ExemptPaths(paths ...string) { + for _, v := range paths { + h.ExemptPath(v) + } +} + +// Exempts URLs that match the specified glob pattern +// (as used by filepath.Match()) from CSRF checks +// +// Note that ExemptGlob() is unable to detect syntax errors, +// because it doesn't have a path to check it against +// and filepath.Match() doesn't report an error +// if the path is empty. +// If we find a way to check the syntax, ExemptGlob +// MIGHT PANIC on a syntax error in the future. +// ALWAYS check your globs for syntax errors. +func (h *CSRFHandler) ExemptGlob(pattern string) { + h.exemptGlobs = append(h.exemptGlobs, pattern) +} + +// A variadic argument version of ExemptGlob() +func (h *CSRFHandler) ExemptGlobs(patterns ...string) { + for _, v := range patterns { + h.ExemptGlob(v) + } +} + +// Accepts a regular expression string or a compiled *regexp.Regexp +// and exempts URLs that match it from CSRF checks. +// +// If the given argument is neither of the accepted values, +// or the given string fails to compile, ExemptRegexp() panics. +func (h *CSRFHandler) ExemptRegexp(re interface{}) { + var compiled *regexp.Regexp + + switch re.(type) { + case string: + compiled = regexp.MustCompile(re.(string)) + case *regexp.Regexp: + compiled = re.(*regexp.Regexp) + default: + err := fmt.Sprintf("%v isn't a valid type for ExemptRegexp()", reflect.TypeOf(re)) + panic(err) + } + + h.exemptRegexps = append(h.exemptRegexps, compiled) +} + +// A variadic argument version of ExemptRegexp() +func (h *CSRFHandler) ExemptRegexps(res ...interface{}) { + for _, v := range res { + h.ExemptRegexp(v) + } +} + +func (h *CSRFHandler) ExemptFunc(fn func(r *http.Request) bool) { + h.exemptFunc = fn +} diff --git a/vendor/github.com/justinas/nosurf/handler.go b/vendor/github.com/justinas/nosurf/handler.go new file mode 100644 index 0000000..298df5e --- /dev/null +++ b/vendor/github.com/justinas/nosurf/handler.go @@ -0,0 +1,220 @@ +// Package nosurf implements an HTTP handler that +// mitigates Cross-Site Request Forgery Attacks. +package nosurf + +import ( + "errors" + "net/http" + "net/url" + "regexp" +) + +const ( + // the name of CSRF cookie + CookieName = "csrf_token" + // the name of the form field + FormFieldName = "csrf_token" + // the name of CSRF header + HeaderName = "X-CSRF-Token" + // the HTTP status code for the default failure handler + FailureCode = 400 + + // Max-Age in seconds for the default base cookie. 365 days. + MaxAge = 365 * 24 * 60 * 60 +) + +var safeMethods = []string{"GET", "HEAD", "OPTIONS", "TRACE"} + +// reasons for CSRF check failures +var ( + ErrNoReferer = errors.New("A secure request contained no Referer or its value was malformed") + ErrBadReferer = errors.New("A secure request's Referer comes from a different Origin" + + " from the request's URL") + ErrBadToken = errors.New("The CSRF token in the cookie doesn't match the one" + + " received in a form/header.") +) + +type CSRFHandler struct { + // Handlers that CSRFHandler wraps. + successHandler http.Handler + failureHandler http.Handler + + // The base cookie that CSRF cookies will be built upon. + // This should be a better solution of customizing the options + // than a bunch of methods SetCookieExpiration(), etc. + baseCookie http.Cookie + + // Slices of paths that are exempt from CSRF checks. + // They can be specified by... + // ...an exact path, + exemptPaths []string + // ...a regexp, + exemptRegexps []*regexp.Regexp + // ...or a glob (as used by path.Match()). + exemptGlobs []string + // ...or a custom matcher function + exemptFunc func(r *http.Request) bool + + // All of those will be matched against Request.URL.Path, + // So they should take the leading slash into account +} + +func defaultFailureHandler(w http.ResponseWriter, r *http.Request) { + http.Error(w, "", FailureCode) +} + +// Extracts the "sent" token from the request +// and returns an unmasked version of it +func extractToken(r *http.Request) []byte { + var sentToken string + + // Prefer the header over form value + sentToken = r.Header.Get(HeaderName) + + // Then POST values + if len(sentToken) == 0 { + sentToken = r.PostFormValue(FormFieldName) + } + + // If all else fails, try a multipart value. + // PostFormValue() will already have called ParseMultipartForm() + if len(sentToken) == 0 && r.MultipartForm != nil { + vals := r.MultipartForm.Value[FormFieldName] + if len(vals) != 0 { + sentToken = vals[0] + } + } + + return b64decode(sentToken) +} + +// Constructs a new CSRFHandler that calls +// the specified handler if the CSRF check succeeds. +func New(handler http.Handler) *CSRFHandler { + baseCookie := http.Cookie{} + baseCookie.MaxAge = MaxAge + + csrf := &CSRFHandler{successHandler: handler, + failureHandler: http.HandlerFunc(defaultFailureHandler), + baseCookie: baseCookie, + } + + return csrf +} + +// The same as New(), but has an interface return type. +func NewPure(handler http.Handler) http.Handler { + return New(handler) +} + +func (h *CSRFHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r = addNosurfContext(r) + defer ctxClear(r) + w.Header().Add("Vary", "Cookie") + + var realToken []byte + + tokenCookie, err := r.Cookie(CookieName) + if err == nil { + realToken = b64decode(tokenCookie.Value) + } + + // If the length of the real token isn't what it should be, + // it has either been tampered with, + // or we're migrating onto a new algorithm for generating tokens, + // or it hasn't ever been set so far. + // In any case of those, we should regenerate it. + // + // As a consequence, CSRF check will fail when comparing the tokens later on, + // so we don't have to fail it just yet. + if len(realToken) != tokenLength { + h.RegenerateToken(w, r) + } else { + ctxSetToken(r, realToken) + } + + if sContains(safeMethods, r.Method) || h.IsExempt(r) { + // short-circuit with a success for safe methods + h.handleSuccess(w, r) + return + } + + // if the request is secure, we enforce origin check + // for referer to prevent MITM of http->https requests + if r.URL.Scheme == "https" { + referer, err := url.Parse(r.Header.Get("Referer")) + + // if we can't parse the referer or it's empty, + // we assume it's not specified + if err != nil || referer.String() == "" { + ctxSetReason(r, ErrNoReferer) + h.handleFailure(w, r) + return + } + + // if the referer doesn't share origin with the request URL, + // we have another error for that + if !sameOrigin(referer, r.URL) { + ctxSetReason(r, ErrBadReferer) + h.handleFailure(w, r) + return + } + } + + // Finally, we check the token itself. + sentToken := extractToken(r) + + if !verifyToken(realToken, sentToken) { + ctxSetReason(r, ErrBadToken) + h.handleFailure(w, r) + return + } + + // Everything else passed, handle the success. + h.handleSuccess(w, r) +} + +// handleSuccess simply calls the successHandler. +// Everything else, like setting a token in the context +// is taken care of by h.ServeHTTP() +func (h *CSRFHandler) handleSuccess(w http.ResponseWriter, r *http.Request) { + h.successHandler.ServeHTTP(w, r) +} + +// Same applies here: h.ServeHTTP() sets the failure reason, the token, +// and only then calls handleFailure() +func (h *CSRFHandler) handleFailure(w http.ResponseWriter, r *http.Request) { + h.failureHandler.ServeHTTP(w, r) +} + +// Generates a new token, sets it on the given request and returns it +func (h *CSRFHandler) RegenerateToken(w http.ResponseWriter, r *http.Request) string { + token := generateToken() + h.setTokenCookie(w, r, token) + + return Token(r) +} + +func (h *CSRFHandler) setTokenCookie(w http.ResponseWriter, r *http.Request, token []byte) { + // ctxSetToken() does the masking for us + ctxSetToken(r, token) + + cookie := h.baseCookie + cookie.Name = CookieName + cookie.Value = b64encode(token) + + http.SetCookie(w, &cookie) + +} + +// Sets the handler to call in case the CSRF check +// fails. By default it's defaultFailureHandler. +func (h *CSRFHandler) SetFailureHandler(handler http.Handler) { + h.failureHandler = handler +} + +// Sets the base cookie to use when building a CSRF token cookie +// This way you can specify the Domain, Path, HttpOnly, Secure, etc. +func (h *CSRFHandler) SetBaseCookie(cookie http.Cookie) { + h.baseCookie = cookie +} diff --git a/vendor/github.com/justinas/nosurf/handler_go17.go b/vendor/github.com/justinas/nosurf/handler_go17.go new file mode 100644 index 0000000..2d8ee9f --- /dev/null +++ b/vendor/github.com/justinas/nosurf/handler_go17.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package nosurf + +import ( + "context" + "net/http" +) + +func addNosurfContext(r *http.Request) *http.Request { + return r.WithContext(context.WithValue(r.Context(), nosurfKey, &csrfContext{})) +} diff --git a/vendor/github.com/justinas/nosurf/handler_legacy.go b/vendor/github.com/justinas/nosurf/handler_legacy.go new file mode 100644 index 0000000..6f2d10a --- /dev/null +++ b/vendor/github.com/justinas/nosurf/handler_legacy.go @@ -0,0 +1,9 @@ +// +build !go1.7 + +package nosurf + +import "net/http" + +func addNosurfContext(r *http.Request) *http.Request { + return r +} diff --git a/vendor/github.com/justinas/nosurf/token.go b/vendor/github.com/justinas/nosurf/token.go new file mode 100644 index 0000000..3c86e11 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/token.go @@ -0,0 +1,105 @@ +package nosurf + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "fmt" + "io" +) + +const ( + tokenLength = 32 +) + +/* +There are two types of tokens. + +* The unmasked "real" token consists of 32 random bytes. + It is stored in a cookie (base64-encoded) and it's the + "reference" value that sent tokens get compared to. + +* The masked "sent" token consists of 64 bytes: + 32 byte key used for one-time pad masking and + 32 byte "real" token masked with the said key. + It is used as a value (base64-encoded as well) + in forms and/or headers. + +Upon processing, both tokens are base64-decoded +and then treated as 32/64 byte slices. +*/ + +// A token is generated by returning tokenLength bytes +// from crypto/rand +func generateToken() []byte { + bytes := make([]byte, tokenLength) + + if _, err := io.ReadFull(rand.Reader, bytes); err != nil { + panic(err) + } + + return bytes +} + +func b64encode(data []byte) string { + return base64.StdEncoding.EncodeToString(data) +} + +func b64decode(data string) []byte { + decoded, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return nil + } + return decoded +} + +// VerifyToken verifies the sent token equals the real one +// and returns a bool value indicating if tokens are equal. +// Supports masked tokens. realToken comes from Token(r) and +// sentToken is token sent unusual way. +func VerifyToken(realToken, sentToken string) bool { + r := b64decode(realToken) + if len(r) == 2*tokenLength { + r = unmaskToken(r) + } + s := b64decode(sentToken) + if len(s) == 2*tokenLength { + s = unmaskToken(s) + } + return subtle.ConstantTimeCompare(r, s) == 1 +} + +func verifyToken(realToken, sentToken []byte) bool { + realN := len(realToken) + sentN := len(sentToken) + + // sentN == tokenLength means the token is unmasked + // sentN == 2*tokenLength means the token is masked. + + if realN == tokenLength && sentN == 2*tokenLength { + return verifyMasked(realToken, sentToken) + } else { + return false + } +} + +// Verifies the masked token +func verifyMasked(realToken, sentToken []byte) bool { + sentPlain := unmaskToken(sentToken) + return subtle.ConstantTimeCompare(realToken, sentPlain) == 1 +} + +func checkForPRNG() { + // Check that cryptographically secure PRNG is available + // In case it's not, panic. + buf := make([]byte, 1) + _, err := io.ReadFull(rand.Reader, buf) + + if err != nil { + panic(fmt.Sprintf("crypto/rand is unavailable: Read() failed with %#v", err)) + } +} + +func init() { + checkForPRNG() +} diff --git a/vendor/github.com/justinas/nosurf/utils.go b/vendor/github.com/justinas/nosurf/utils.go new file mode 100644 index 0000000..37ae6d9 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/utils.go @@ -0,0 +1,25 @@ +package nosurf + +import ( + "net/url" +) + +func sContains(slice []string, s string) bool { + // checks if the given slice contains the given string + for _, v := range slice { + if v == s { + return true + } + } + return false +} + +// Checks if the given URLs have the same origin +// (that is, they share the host, the port and the scheme) +func sameOrigin(u1, u2 *url.URL) bool { + // we take pointers, as url.Parse() returns a pointer + // and http.Request.URL is a pointer as well + + // Host is either host or host:port + return (u1.Scheme == u2.Scheme && u1.Host == u2.Host) +} diff --git a/vendor/github.com/kardianos/osext/LICENSE b/vendor/github.com/kardianos/osext/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/kardianos/osext/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kardianos/osext/README.md b/vendor/github.com/kardianos/osext/README.md new file mode 100644 index 0000000..15cbc3d --- /dev/null +++ b/vendor/github.com/kardianos/osext/README.md @@ -0,0 +1,21 @@ +### Extensions to the "os" package. + +[![GoDoc](https://godoc.org/github.com/kardianos/osext?status.svg)](https://godoc.org/github.com/kardianos/osext) + +## Find the current Executable and ExecutableFolder. + +As of go1.8 the Executable function may be found in `os`. The Executable function +in the std lib `os` package is used if available. + +There is sometimes utility in finding the current executable file +that is running. This can be used for upgrading the current executable +or finding resources located relative to the executable file. Both +working directory and the os.Args[0] value are arbitrary and cannot +be relied on; os.Args[0] can be "faked". + +Multi-platform and supports: + * Linux + * OS X + * Windows + * Plan 9 + * BSDs. diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go new file mode 100644 index 0000000..17f380f --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Extensions to the standard "os" package. +package osext // import "github.com/kardianos/osext" + +import "path/filepath" + +var cx, ce = executableClean() + +func executableClean() (string, error) { + p, err := executable() + return filepath.Clean(p), err +} + +// Executable returns an absolute path that can be used to +// re-invoke the current program. +// It may not be valid after the current program exits. +func Executable() (string, error) { + return cx, ce +} + +// Returns same path as Executable, returns just the folder +// path. Excludes the executable name and any trailing slash. +func ExecutableFolder() (string, error) { + p, err := Executable() + if err != nil { + return "", err + } + + return filepath.Dir(p), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_go18.go b/vendor/github.com/kardianos/osext/osext_go18.go new file mode 100644 index 0000000..009d8a9 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_go18.go @@ -0,0 +1,9 @@ +//+build go1.8,!openbsd + +package osext + +import "os" + +func executable() (string, error) { + return os.Executable() +} diff --git a/vendor/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go new file mode 100644 index 0000000..95e2371 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_plan9.go @@ -0,0 +1,22 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package osext + +import ( + "os" + "strconv" + "syscall" +) + +func executable() (string, error) { + f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text") + if err != nil { + return "", err + } + defer f.Close() + return syscall.Fd2path(int(f.Fd())) +} diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go new file mode 100644 index 0000000..7b0debb --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_procfs.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8,linux !go1.8,netbsd !go1.8,solaris !go1.8,dragonfly + +package osext + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" +) + +func executable() (string, error) { + switch runtime.GOOS { + case "linux": + const deletedTag = " (deleted)" + execpath, err := os.Readlink("/proc/self/exe") + if err != nil { + return execpath, err + } + execpath = strings.TrimSuffix(execpath, deletedTag) + execpath = strings.TrimPrefix(execpath, deletedTag) + return execpath, nil + case "netbsd": + return os.Readlink("/proc/curproc/exe") + case "dragonfly": + return os.Readlink("/proc/curproc/file") + case "solaris": + return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) + } + return "", errors.New("ExecPath not implemented for " + runtime.GOOS) +} diff --git a/vendor/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go new file mode 100644 index 0000000..33cee25 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_sysctl.go @@ -0,0 +1,126 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8,darwin !go1.8,freebsd openbsd + +package osext + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "syscall" + "unsafe" +) + +var initCwd, initCwdErr = os.Getwd() + +func executable() (string, error) { + var mib [4]int32 + switch runtime.GOOS { + case "freebsd": + mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} + case "darwin": + mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} + case "openbsd": + mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */} + } + + n := uintptr(0) + // Get length. + _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + buf := make([]byte, n) + _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + + var execPath string + switch runtime.GOOS { + case "openbsd": + // buf now contains **argv, with pointers to each of the C-style + // NULL terminated arguments. + var args []string + argv := uintptr(unsafe.Pointer(&buf[0])) + Loop: + for { + argp := *(**[1 << 20]byte)(unsafe.Pointer(argv)) + if argp == nil { + break + } + for i := 0; uintptr(i) < n; i++ { + // we don't want the full arguments list + if string(argp[i]) == " " { + break Loop + } + if argp[i] != 0 { + continue + } + args = append(args, string(argp[:i])) + n -= uintptr(i) + break + } + if n < unsafe.Sizeof(argv) { + break + } + argv += unsafe.Sizeof(argv) + n -= unsafe.Sizeof(argv) + } + execPath = args[0] + // There is no canonical way to get an executable path on + // OpenBSD, so check PATH in case we are called directly + if execPath[0] != '/' && execPath[0] != '.' { + execIsInPath, err := exec.LookPath(execPath) + if err == nil { + execPath = execIsInPath + } + } + default: + for i, v := range buf { + if v == 0 { + buf = buf[:i] + break + } + } + execPath = string(buf) + } + + var err error + // execPath will not be empty due to above checks. + // Try to get the absolute path if the execPath is not rooted. + if execPath[0] != '/' { + execPath, err = getAbs(execPath) + if err != nil { + return execPath, err + } + } + // For darwin KERN_PROCARGS may return the path to a symlink rather than the + // actual executable. + if runtime.GOOS == "darwin" { + if execPath, err = filepath.EvalSymlinks(execPath); err != nil { + return execPath, err + } + } + return execPath, nil +} + +func getAbs(execPath string) (string, error) { + if initCwdErr != nil { + return execPath, initCwdErr + } + // The execPath may begin with a "../" or a "./" so clean it first. + // Join the two paths, trailing and starting slashes undetermined, so use + // the generic Join function. + return filepath.Join(initCwd, filepath.Clean(execPath)), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go new file mode 100644 index 0000000..074b3b3 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_windows.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package osext + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") +) + +// GetModuleFileName() with hModule = NULL +func executable() (exePath string, err error) { + return getModuleFileName() +} + +func getModuleFileName() (string, error) { + var n uint32 + b := make([]uint16, syscall.MAX_PATH) + size := uint32(len(b)) + + r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) + n = uint32(r0) + if n == 0 { + return "", e1 + } + return string(utf16.Decode(b[0:n])), nil +} diff --git a/vendor/github.com/pressly/chi/CHANGELOG.md b/vendor/github.com/pressly/chi/CHANGELOG.md new file mode 100644 index 0000000..754cd2f --- /dev/null +++ b/vendor/github.com/pressly/chi/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +## v2.0.0 (2017-01-06) + +- After many months of v2 being in an RC state with many companies and users running it in +production, the inclusion of some improvements to the middlewares, we are very pleased to +announce v2.0.0 of chi. + + +## v2.0.0-rc1 (2016-07-26) + +- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular +community `"net/context"` package has been included in the standard library as `"context"` and +utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other +request-scoped values. We're very excited about the new context addition and are proud to +introduce chi v2, a minimal and powerful routing package for building large HTTP services, +with zero external dependencies. Chi focuses on idiomatic design and encourages the use of +stdlib HTTP handlers and middlwares. +- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` +- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` +- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, + which provides direct access to URL routing parameters, the routing path and the matching + routing patterns. +- Users upgrading from chi v1 to v2, need to: + 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to + the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` + 2. Use `chi.URLParam(r *http.Request, paramKey string) string` + or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value + + +## v1.0.0 (2016-07-01) + +- Released chi v1 stable https://github.com/pressly/chi/tree/v1.0.0 for Go 1.6 and older. + + +## v0.9.0 (2016-03-31) + +- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/pressly/chi/pull/33) +- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters + has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/pressly/chi/CONTRIBUTING.md b/vendor/github.com/pressly/chi/CONTRIBUTING.md new file mode 100644 index 0000000..5ea99fa --- /dev/null +++ b/vendor/github.com/pressly/chi/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing + +## Prerequisites + +1. [Install Go][go-install]. +2. Download the sources and switch the working directory: + + ```bash + go get -u -d github.com/pressly/chi + cd $GOPATH/src/github.com/pressly/chi + ``` + +## Submitting a Pull Request + +A typical workflow is: + +1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] +2. [Create a topic branch.][branch] +3. Add tests for your change. +4. Run `go test`. If your tests pass, return to the step 3. +5. Implement the change and ensure the steps from the previous step pass. +6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. +7. [Add, commit and push your changes.][git-help] +8. [Submit a pull request.][pull-req] + +[go-install]: https://golang.org/doc/install +[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html +[fork]: https://help.github.com/articles/fork-a-repo +[branch]: http://learn.github.com/p/branching.html +[git-help]: https://guides.github.com +[pull-req]: https://help.github.com/articles/using-pull-requests diff --git a/vendor/github.com/pressly/chi/LICENSE b/vendor/github.com/pressly/chi/LICENSE new file mode 100644 index 0000000..7b5f914 --- /dev/null +++ b/vendor/github.com/pressly/chi/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka) + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/pressly/chi/README.md b/vendor/github.com/pressly/chi/README.md new file mode 100644 index 0000000..19cb9b6 --- /dev/null +++ b/vendor/github.com/pressly/chi/README.md @@ -0,0 +1,408 @@ +chi +=== + +[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] + +`chi` is a lightweight, idiomatic and composable router for building Go 1.7+ HTTP services. It's +especially good at helping you write large REST API services that are kept maintainable as your +project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to +handle signaling, cancelation and request-scoped values across a handler chain. + +The focus of the project has been to seek out an elegant and comfortable design for writing +REST API servers, written during the development of the Pressly API service that powers our +public API service, which in turn powers all of our client-side applications. + +The key considerations of chi's design are: project structure, maintainability, standard http +handlers (stdlib-only), developer productivity, and deconstructing a large system into many small +parts. The core router `github.com/pressly/chi` is quite small (less than 1000 LOC), but we've also +included some useful/optional subpackages: `middleware`, `render` and `docgen`. We hope you enjoy it too! + +## Install + +`go get -u github.com/pressly/chi` + + +## Features + +* **Lightweight** - cloc'd in <1000 LOC for the chi router +* **Fast** - yes, see [benchmarks](#benchmarks) +* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compat with `net/http` +* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting +* **Context control** - built on new `context` package, providing value chaining, cancelations and timeouts +* **Robust** - tested / used in production at Pressly.com, and many others +* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown +* **No external dependencies** - plain ol' Go 1.7+ stdlib + net/http + + +## Examples + +* [rest](https://github.com/pressly/chi/blob/master/_examples/rest/main.go) - REST APIs made easy, productive and maintainable +* [logging](https://github.com/pressly/chi/blob/master/_examples/logging/main.go) - Easy structured logging for any backend +* [limits](https://github.com/pressly/chi/blob/master/_examples/limits/main.go) - Timeouts and Throttling +* [todos-resource](https://github.com/pressly/chi/blob/master/_examples/todos-resource/main.go) - Struct routers/handlers, an example of another code layout style +* [versions](https://github.com/pressly/chi/blob/master/_examples/versions/main.go) - Demo of `chi/render` subpkg +* [fileserver](https://github.com/pressly/chi/blob/master/_examples/fileserver/main.go) - Easily serve static files +* [graceful](https://github.com/pressly/chi/blob/master/_examples/graceful/main.go) - Graceful context signaling and server shutdown + + +**As easy as:** + +```go +package main + +import ( + "net/http" + "github.com/pressly/chi" +) + +func main() { + r := chi.NewRouter() + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + http.ListenAndServe(":3000", r) +} +``` + +**REST Preview:** + +Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs +in JSON ([routes.json](https://github.com/pressly/chi/blob/master/_examples/rest/routes.json)) and in +Markdown ([routes.md](https://github.com/pressly/chi/blob/master/_examples/rest/routes.md)). + +I highly recommend reading the source of the [examples](#examples) listed above, they will show you all the features +of chi and serve as a good form of documentation. + +```go +import ( + //... + "context" + "github.com/pressly/chi" + "github.com/pressly/chi/middleware" +) + +func main() { + r := chi.NewRouter() + + // A good base middleware stack + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + // When a client closes their connection midway through a request, the + // http.CloseNotifier will cancel the request context (ctx). + r.Use(middleware.CloseNotify) + + // Set a timeout value on the request context (ctx), that will signal + // through ctx.Done() that the request has timed out and further + // processing should be stopped. + r.Use(middleware.Timeout(60 * time.Second)) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + + // RESTy routes for "articles" resource + r.Route("/articles", func(r chi.Router) { + r.With(paginate).Get("/", listArticles) // GET /articles + r.Post("/", createArticle) // POST /articles + r.Get("/search", searchArticles) // GET /articles/search + + r.Route("/:articleID", func(r chi.Router) { + r.Use(ArticleCtx) + r.Get("/", getArticle) // GET /articles/123 + r.Put("/", updateArticle) // PUT /articles/123 + r.Delete("/", deleteArticle) // DELETE /articles/123 + }) + }) + + // Mount the admin sub-router + r.Mount("/admin", adminRouter()) + + http.ListenAndServe(":3333", r) +} + +func ArticleCtx(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + articleID := chi.URLParam(r, "articleID") + article, err := dbGetArticle(articleID) + if err != nil { + http.Error(w, http.StatusText(404), 404) + return + } + ctx := context.WithValue(r.Context(), "article", article) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getArticle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + article, ok := ctx.Value("article").(*Article) + if !ok { + http.Error(w, http.StatusText(422), 422) + return + } + w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) +} + +// A completely separate router for administrator routes +func adminRouter() http.Handler { + r := chi.NewRouter() + r.Use(AdminOnly) + r.Get("/", adminIndex) + r.Get("/accounts", adminListAccounts) + return r +} + +func AdminOnly(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + perm, ok := ctx.Value("acl.permission").(YourPermissionType) + if !ok || !perm.IsAdmin() { + http.Error(w, http.StatusText(403), 403) + return + } + next.ServeHTTP(w, r) + }) +} +``` + + +## Router design + +chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). +The router is fully compatible with `net/http`. + +Built on top of the tree is the `Router` interface: + +```go +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one of more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares +} +``` + +Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern +supports named params (ie. `/users/:userID`) and wildcards (ie. `/admin/*`). + + +### Middleware handlers + +chi's middlewares are just stdlib net/http middleware handlers. There is nothing special +about them, which means the router and all the tooling is designed to be compatible and +friendly with any middleware in the community. This offers much better extensibility and reuse +of packages and is at the heart of chi's purpose. + +Here is an example of a standard net/http middleware handler using the new request context +available in Go 1.7+. This middleware sets a hypothetical user identifier on the request +context and calls the next handler in the chain. + +```go +// HTTP middleware setting a value on the request context +func MyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(r.Context(), "user", "123") + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} +``` + + +### Request handlers + +chi uses standard net/http request handlers. This little snippet is an example of a http.Handler +func that reads a user identifier from the request context - hypothetically, identifying +the user sending an authenticated request, validated+set by a previous middleware handler. + +```go +// HTTP handler accessing data from the request context. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + user := r.Context().Value("user").(string) + w.Write([]byte(fmt.Sprintf("hi %s", user))) +} +``` + + +### URL parameters + +chi's router parses and stores URL parameters right onto the request context. Here is +an example of how to access URL params in your net/http handlers. And of course, middlewares +are able to access the same information. + +```go +// HTTP handler accessing the url routing parameters. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") // from a route like /users/:userID + + ctx := r.Context() + key := ctx.Value("key").(string) + + w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) +} +``` + + +## Middlewares + +chi comes equipped with an optional `middleware` package, providing a suite of standard +`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible +with `net/http` can be used with chi's mux. + +---------------------------------------------------------------------------------------------------------- +| Middleware | Description | +|:---------------------|:--------------------------------------------------------------------------------- +| RequestID | Injects a request ID into the context of each request. | +| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP. | +| Logger | Logs the start and end of each request with the elapsed processing time. | +| Recoverer | Gracefully absorb panics and prints the stack trace. | +| NoCache | Sets response headers to prevent clients from caching. | +| Timeout | Signals to the request context when the timeout deadline is reached. | +| Throttle | Puts a ceiling on the number of concurrent requests. | +| Compress | Gzip compression for clients that accept compressed responses. | +| Profiler | Easily attach net/http/pprof to your routers. | +| StripSlashes | Strip slashes on routing paths. | +| RedirectSlashes | Redirect slashes on routing paths. | +| WithValue | Short-hand middleware to set a key/value on the request context. | +| Heartbeat | Monitoring endpoint to check the servers pulse. | +---------------------------------------------------------------------------------------------------------- + +Other cool net/http middlewares: + +* [jwtauth](https://github.com/goware/jwtauth) - JWT authenticator +* [cors](https://github.com/goware/cors) - CORS middleware +* [httpcoala](https://github.com/goware/httpcoala) - Request coalescer + +please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi middleware + + +## context? + +`context` is a tiny pkg that provides simple interface to signal context across call stacks +and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) +and is available in stdlib since go1.7. + +Learn more at https://blog.golang.org/context + +and.. +* Docs: https://golang.org/pkg/context +* Source: https://github.com/golang/go/tree/master/src/context + + +## Benchmarks + +The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark + +Comparison with other routers (as of Jan 7/17): https://gist.github.com/pkieltyka/d0814d5396c996cb3ff8076399583d1f + +```shell +BenchmarkChi_Param 5000000 398 ns/op 304 B/op 2 allocs/op +BenchmarkChi_Param5 3000000 556 ns/op 304 B/op 2 allocs/op +BenchmarkChi_Param20 1000000 1184 ns/op 304 B/op 2 allocs/op +BenchmarkChi_ParamWrite 3000000 443 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GithubStatic 3000000 427 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GithubParam 3000000 565 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GithubAll 10000 122143 ns/op 61716 B/op 406 allocs/op +BenchmarkChi_GPlusStatic 5000000 383 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GPlusParam 3000000 431 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GPlus2Params 3000000 500 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GPlusAll 200000 6410 ns/op 3952 B/op 26 allocs/op +BenchmarkChi_ParseStatic 5000000 384 ns/op 304 B/op 2 allocs/op +BenchmarkChi_ParseParam 3000000 415 ns/op 304 B/op 2 allocs/op +BenchmarkChi_Parse2Params 3000000 450 ns/op 304 B/op 2 allocs/op +BenchmarkChi_ParseAll 100000 12124 ns/op 7904 B/op 52 allocs/op +BenchmarkChi_StaticAll 20000 78501 ns/op 47731 B/op 314 allocs/op +``` + +NOTE: the allocs in the benchmark above are from the calls to http.Request's +`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` +on the duplicated (alloc'd) request and returns it the new request object. This is just +how setting context on a request in Go 1.7+ works. + + +## Credits + +* Carl Jackson for https://github.com/zenazn/goji + * Parts of chi's thinking comes from goji, and chi's middleware package + sources from goji. +* Armon Dadgar for https://github.com/armon/go-radix +* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) + +We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! + + +## Beyond REST + +chi is just a http router that lets you decompose request handling into many smaller layers. +Many companies including Pressly.com (of course) use chi to write REST services for their public +APIs. But, REST is just a convention for managing state via HTTP, and there's a lot of other pieces +required to write a complete client-server system or network of microservices. + +Looking ahead beyond REST, I also recommend some newer works in the field coming from +[gRPC](https://github.com/grpc/grpc-go), [NATS](https://nats.io), [go-kit](https://github.com/go-kit/kit) +and even [graphql](https://github.com/graphql-go/graphql). They're all pretty cool with their +own unique approaches and benefits. Specifically, I'd look at gRPC since it makes client-server +communication feel like a single program on a single computer, no need to hand-write a client library +and the request/response payloads are typed contracts. NATS is pretty amazing too as a super +fast and lightweight pub-sub transport that can speak protobufs, with nice service discovery - +an excellent combination with gRPC. + + +## License + +Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) + +Licensed under [MIT License](./LICENSE) + +[GoDoc]: https://godoc.org/github.com/pressly/chi +[GoDoc Widget]: https://godoc.org/github.com/pressly/chi?status.svg +[Travis]: https://travis-ci.org/pressly/chi +[Travis Widget]: https://travis-ci.org/pressly/chi.svg?branch=master diff --git a/vendor/github.com/pressly/chi/chain.go b/vendor/github.com/pressly/chi/chain.go new file mode 100644 index 0000000..30e5247 --- /dev/null +++ b/vendor/github.com/pressly/chi/chain.go @@ -0,0 +1,47 @@ +package chi + +import "net/http" + +// Chain returns a Middlewares type from a slice of middleware handlers. +func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { + return Middlewares(middlewares) +} + +// Handler builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) Handler(h http.Handler) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +// HandlerFunc builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +type ChainHandler struct { + Middlewares Middlewares + Endpoint http.Handler + chain http.Handler +} + +func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c.chain.ServeHTTP(w, r) +} + +// chain builds a http.Handler composed of an inline middleware stack and endpoint +// handler in the order they are passed. +func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { + // Return ahead of time if there aren't any middlewares for the chain + if len(middlewares) == 0 { + return endpoint + } + + // Wrap the end handler with the middleware chain + h := middlewares[len(middlewares)-1](endpoint) + for i := len(middlewares) - 2; i >= 0; i-- { + h = middlewares[i](h) + } + + return h +} diff --git a/vendor/github.com/pressly/chi/chi.go b/vendor/github.com/pressly/chi/chi.go new file mode 100644 index 0000000..7ef709e --- /dev/null +++ b/vendor/github.com/pressly/chi/chi.go @@ -0,0 +1,98 @@ +// +// Package chi is a small, idiomatic and composable router for building HTTP services. +// +// chi requires Go 1.7 or newer. +// +// Example: +// package main +// +// import ( +// "net/http" +// +// "github.com/pressly/chi" +// "github.com/pressly/chi/middleware" +// ) +// +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) +// +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) +// +// http.ListenAndServe(":3333", r) +// } +// +// See github.com/pressly/chi/_examples/ for more in-depth examples. +// +package chi + +import "net/http" + +// NewRouter returns a new Mux object that implements the Router interface. +func NewRouter() *Mux { + return NewMux() +} + +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one of more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares +} + +// Middlewares type is a slice of standard middleware handlers with methods +// to compose middleware chains and http.Handler's. +type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/pressly/chi/context.go b/vendor/github.com/pressly/chi/context.go new file mode 100644 index 0000000..189e7a8 --- /dev/null +++ b/vendor/github.com/pressly/chi/context.go @@ -0,0 +1,138 @@ +package chi + +import ( + "context" + "net" + "net/http" +) + +var ( + RouteCtxKey = &contextKey{"RouteContext"} +) + +// Context is the default routing context set on the root node of a +// request context to track URL parameters and an optional routing path. +type Context struct { + // URL routing parameter key and values. + URLParams params + + // Routing path override used by subrouters. + RoutePath string + + // Routing pattern matching the path. + RoutePattern string + + // Routing patterns throughout the lifecycle of the request, + // across all connected routers. + RoutePatterns []string +} + +// NewRouteContext returns a new routing Context object. +func NewRouteContext() *Context { + return &Context{} +} + +// reset a routing context to its initial state. +func (x *Context) reset() { + x.URLParams = x.URLParams[:0] + x.RoutePath = "" + x.RoutePattern = "" + x.RoutePatterns = x.RoutePatterns[:0] +} + +// RouteContext returns chi's routing Context object from a +// http.Request Context. +func RouteContext(ctx context.Context) *Context { + return ctx.Value(RouteCtxKey).(*Context) +} + +// URLParam returns the url parameter from a http.Request object. +func URLParam(r *http.Request, key string) string { + if rctx := RouteContext(r.Context()); rctx != nil { + return rctx.URLParams.Get(key) + } + return "" +} + +// URLParamFromCtx returns the url parameter from a http.Request Context. +func URLParamFromCtx(ctx context.Context, key string) string { + if rctx := RouteContext(ctx); rctx != nil { + return rctx.URLParams.Get(key) + } + return "" +} + +type param struct { + Key, Value string +} + +type params []param + +func (ps *params) Add(key string, value string) { + *ps = append(*ps, param{key, value}) +} + +func (ps params) Get(key string) string { + for _, p := range ps { + if p.Key == key { + return p.Value + } + } + return "" +} + +func (ps *params) Set(key string, value string) { + idx := -1 + for i, p := range *ps { + if p.Key == key { + idx = i + break + } + } + if idx < 0 { + (*ps).Add(key, value) + } else { + (*ps)[idx] = param{key, value} + } +} + +func (ps *params) Del(key string) string { + for i, p := range *ps { + if p.Key == key { + *ps = append((*ps)[:i], (*ps)[i+1:]...) + return p.Value + } + } + return "" +} + +// ServerBaseContext wraps an http.Handler to set the request context to the +// `baseCtx`. +func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler { + fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + baseCtx := baseCtx + + // Copy over default net/http server context keys + if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok { + baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v) + } + if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok { + baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v) + } + + h.ServeHTTP(w, r.WithContext(baseCtx)) + }) + return fn +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi context value " + k.name +} diff --git a/vendor/github.com/pressly/chi/middleware/closenotify17.go b/vendor/github.com/pressly/chi/middleware/closenotify17.go new file mode 100644 index 0000000..95802b1 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/closenotify17.go @@ -0,0 +1,42 @@ +// +build go1.7,!go1.8 + +package middleware + +import ( + "context" + "net/http" +) + +// CloseNotify is a middleware that cancels ctx when the underlying +// connection has gone away. It can be used to cancel long operations +// on the server when the client disconnects before the response is ready. +// +// Note: this behaviour is standard in Go 1.8+, so the middleware does nothing +// on 1.8+ and exists just for backwards compatibility. +func CloseNotify(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + cn, ok := w.(http.CloseNotifier) + if !ok { + panic("chi/middleware: CloseNotify expects http.ResponseWriter to implement http.CloseNotifier interface") + } + closeNotifyCh := cn.CloseNotify() + + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + return + case <-closeNotifyCh: + cancel() + return + } + }() + + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/closenotify18.go b/vendor/github.com/pressly/chi/middleware/closenotify18.go new file mode 100644 index 0000000..ec54bca --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/closenotify18.go @@ -0,0 +1,17 @@ +// +build go1.8 + +package middleware + +import ( + "net/http" +) + +// CloseNotify is a middleware that cancels ctx when the underlying +// connection has gone away. It can be used to cancel long operations +// on the server when the client disconnects before the response is ready. +// +// Note: this behaviour is standard in Go 1.8+, so the middleware does nothing +// on 1.8+ and exists just for backwards compatibility. +func CloseNotify(next http.Handler) http.Handler { + return next +} diff --git a/vendor/github.com/pressly/chi/middleware/compress.go b/vendor/github.com/pressly/chi/middleware/compress.go new file mode 100644 index 0000000..17e2f3e --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/compress.go @@ -0,0 +1,212 @@ +package middleware + +import ( + "bufio" + "compress/flate" + "compress/gzip" + "errors" + "io" + "net" + "net/http" + "strings" +) + +type encoding int + +const ( + encodingNone encoding = iota + encodingGzip + encodingDeflate +) + +var defaultContentTypes = map[string]struct{}{ + "text/html": struct{}{}, + "text/css": struct{}{}, + "text/plain": struct{}{}, + "text/javascript": struct{}{}, + "application/javascript": struct{}{}, + "application/x-javascript": struct{}{}, + "application/json": struct{}{}, + "application/atom+xml": struct{}{}, + "application/rss+xml ": struct{}{}, +} + +// DefaultCompress is a middleware that compresses response +// body of predefined content types to a data format based +// on Accept-Encoding request header. It uses a default +// compression level. +func DefaultCompress(next http.Handler) http.Handler { + return Compress(flate.DefaultCompression)(next) +} + +// Compress is a middleware that compresses response +// body of a given content types to a data format based +// on Accept-Encoding request header. It uses a given +// compression level. +func Compress(level int, types ...string) func(next http.Handler) http.Handler { + contentTypes := defaultContentTypes + if len(types) > 0 { + contentTypes = make(map[string]struct{}, len(types)) + for _, t := range types { + contentTypes[t] = struct{}{} + } + } + + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + mcw := &maybeCompressResponseWriter{ + ResponseWriter: w, + w: w, + contentTypes: contentTypes, + encoding: selectEncoding(r.Header), + level: level, + } + defer mcw.Close() + + next.ServeHTTP(mcw, r) + } + + return http.HandlerFunc(fn) + } +} + +func selectEncoding(h http.Header) encoding { + enc := h.Get("Accept-Encoding") + + switch { + // TODO: + // case "br": // Brotli, experimental. Firefox 2016, to-be-in Chromium. + // case "lzma": // Opera. + // case "sdch": // Chrome, Android. Gzip output + dictionary header. + + case strings.Contains(enc, "gzip"): + // TODO: Exception for old MSIE browsers that can't handle non-HTML? + // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression + return encodingGzip + + case strings.Contains(enc, "deflate"): + // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951) + // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32 + // checksum compared to CRC-32 used in "gzip" and thus is faster. + // + // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect + // raw DEFLATE data only, without the mentioned zlib wrapper. + // Because of this major confusion, most modern browsers try it + // both ways, first looking for zlib headers. + // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548 + // + // The list of browsers having problems is quite big, see: + // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression + // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results + // + // That's why we prefer gzip over deflate. It's just more reliable + // and not significantly slower than gzip. + return encodingDeflate + + // NOTE: Not implemented, intentionally: + // case "compress": // LZW. Deprecated. + // case "bzip2": // Too slow on-the-fly. + // case "zopfli": // Too slow on-the-fly. + // case "xz": // Too slow on-the-fly. + } + + return encodingNone +} + +type maybeCompressResponseWriter struct { + http.ResponseWriter + w io.Writer + encoding encoding + contentTypes map[string]struct{} + level int + wroteHeader bool +} + +func (w *maybeCompressResponseWriter) WriteHeader(code int) { + if w.wroteHeader { + return + } + w.wroteHeader = true + defer w.ResponseWriter.WriteHeader(code) + + // Already compressed data? + if w.ResponseWriter.Header().Get("Content-Encoding") != "" { + return + } + // The content-length after compression is unknown + w.ResponseWriter.Header().Del("Content-Length") + + // Parse the first part of the Content-Type response header. + contentType := "" + parts := strings.Split(w.ResponseWriter.Header().Get("Content-Type"), ";") + if len(parts) > 0 { + contentType = parts[0] + } + + // Is the content type compressable? + if _, ok := w.contentTypes[contentType]; !ok { + return + } + + // Select the compress writer. + switch w.encoding { + case encodingGzip: + gw, err := gzip.NewWriterLevel(w.ResponseWriter, w.level) + if err != nil { + w.w = w.ResponseWriter + return + } + w.w = gw + w.ResponseWriter.Header().Set("Content-Encoding", "gzip") + + case encodingDeflate: + dw, err := flate.NewWriter(w.ResponseWriter, w.level) + if err != nil { + w.w = w.ResponseWriter + return + } + w.w = dw + w.ResponseWriter.Header().Set("Content-Encoding", "deflate") + } +} + +func (w *maybeCompressResponseWriter) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + return w.w.Write(p) +} + +func (w *maybeCompressResponseWriter) Flush() { + if f, ok := w.w.(http.Flusher); ok { + f.Flush() + } +} + +func (w *maybeCompressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := w.w.(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer") +} + +func (w *maybeCompressResponseWriter) CloseNotify() <-chan bool { + if cn, ok := w.w.(http.CloseNotifier); ok { + return cn.CloseNotify() + } + + // If the underlying writer does not implement http.CloseNotifier, return + // a channel that never receives a value. The semantics here is that the + // client never disconnnects before the request is processed by the + // http.Handler, which is close enough to the default behavior (when + // CloseNotify() is not even called). + return make(chan bool, 1) +} + +func (w *maybeCompressResponseWriter) Close() error { + if c, ok := w.w.(io.WriteCloser); ok { + return c.Close() + } + return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer") +} diff --git a/vendor/github.com/pressly/chi/middleware/compress18.go b/vendor/github.com/pressly/chi/middleware/compress18.go new file mode 100644 index 0000000..ad12b14 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/compress18.go @@ -0,0 +1,15 @@ +// +build go1.8 + +package middleware + +import ( + "errors" + "net/http" +) + +func (w *maybeCompressResponseWriter) Push(target string, opts *http.PushOptions) error { + if ps, ok := w.w.(http.Pusher); ok { + return ps.Push(target, opts) + } + return errors.New("chi/middleware: http.Pusher is unavailable on the writer") +} diff --git a/vendor/github.com/pressly/chi/middleware/heartbeat.go b/vendor/github.com/pressly/chi/middleware/heartbeat.go new file mode 100644 index 0000000..fe822fb --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/heartbeat.go @@ -0,0 +1,26 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// Heartbeat endpoint middleware useful to setting up a path like +// `/ping` that load balancers or uptime testing external services +// can make a request before hitting any routes. It's also convenient +// to place this above ACL middlewares as well. +func Heartbeat(endpoint string) func(http.Handler) http.Handler { + f := func(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write([]byte(".")) + return + } + h.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } + return f +} diff --git a/vendor/github.com/pressly/chi/middleware/logger.go b/vendor/github.com/pressly/chi/middleware/logger.go new file mode 100644 index 0000000..7d68add --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/logger.go @@ -0,0 +1,137 @@ +package middleware + +import ( + "bytes" + "context" + "log" + "net/http" + "os" + "time" +) + +var ( + LogEntryCtxKey = &contextKey{"LogEntry"} + + DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags)}) +) + +// Logger is a middleware that logs the start and end of each request, along +// with some useful data about what was requested, what the response status was, +// and how long it took to return. When standard output is a TTY, Logger will +// print in color, otherwise it will print in black and white. Logger prints a +// request ID if one is provided. +// +// Alternatively, look at https://github.com/pressly/lg and the `lg.RequestLogger` +// middleware pkg. +func Logger(next http.Handler) http.Handler { + return DefaultLogger(next) +} + +func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + entry := f.NewLogEntry(r) + ww := NewWrapResponseWriter(w, r.ProtoMajor) + + t1 := time.Now() + defer func() { + t2 := time.Now() + entry.Write(ww.Status(), ww.BytesWritten(), t2.Sub(t1)) + }() + + next.ServeHTTP(ww, WithLogEntry(r, entry)) + } + return http.HandlerFunc(fn) + } +} + +type LogFormatter interface { + NewLogEntry(r *http.Request) LogEntry +} + +type LogEntry interface { + Write(status, bytes int, elapsed time.Duration) + Panic(v interface{}, stack []byte) +} + +func GetLogEntry(r *http.Request) LogEntry { + entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry) + return entry +} + +func WithLogEntry(r *http.Request, entry LogEntry) *http.Request { + r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry)) + return r +} + +type DefaultLogFormatter struct { + Logger *log.Logger +} + +func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry { + entry := &defaultLogEntry{ + DefaultLogFormatter: l, + request: r, + buf: &bytes.Buffer{}, + } + + reqID := GetReqID(r.Context()) + if reqID != "" { + cW(entry.buf, nYellow, "[%s] ", reqID) + } + cW(entry.buf, nCyan, "\"") + cW(entry.buf, bMagenta, "%s ", r.Method) + + scheme := "http" + if r.TLS != nil { + scheme = "https" + } + cW(entry.buf, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto) + + entry.buf.WriteString("from ") + entry.buf.WriteString(r.RemoteAddr) + entry.buf.WriteString(" - ") + + return entry +} + +type defaultLogEntry struct { + *DefaultLogFormatter + request *http.Request + buf *bytes.Buffer +} + +func (l *defaultLogEntry) Write(status, bytes int, elapsed time.Duration) { + switch { + case status < 200: + cW(l.buf, bBlue, "%03d", status) + case status < 300: + cW(l.buf, bGreen, "%03d", status) + case status < 400: + cW(l.buf, bCyan, "%03d", status) + case status < 500: + cW(l.buf, bYellow, "%03d", status) + default: + cW(l.buf, bRed, "%03d", status) + } + + cW(l.buf, bBlue, " %dB", bytes) + + l.buf.WriteString(" in ") + if elapsed < 500*time.Millisecond { + cW(l.buf, nGreen, "%s", elapsed) + } else if elapsed < 5*time.Second { + cW(l.buf, nYellow, "%s", elapsed) + } else { + cW(l.buf, nRed, "%s", elapsed) + } + + l.Logger.Print(l.buf.String()) +} + +func (l *defaultLogEntry) Panic(v interface{}, stack []byte) { + panicEntry := l.NewLogEntry(l.request).(*defaultLogEntry) + cW(panicEntry.buf, bRed, "panic: %+v", v) + l.Logger.Print(panicEntry.buf.String()) + l.Logger.Print(string(stack)) +} diff --git a/vendor/github.com/pressly/chi/middleware/middleware.go b/vendor/github.com/pressly/chi/middleware/middleware.go new file mode 100644 index 0000000..be6a44f --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/middleware.go @@ -0,0 +1,12 @@ +package middleware + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi/middleware context value " + k.name +} diff --git a/vendor/github.com/pressly/chi/middleware/nocache.go b/vendor/github.com/pressly/chi/middleware/nocache.go new file mode 100644 index 0000000..7e8747f --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/nocache.go @@ -0,0 +1,58 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "time" +) + +// Unix epoch time +var epoch = time.Unix(0, 0).Format(time.RFC1123) + +// Taken from https://github.com/mytrile/nocache +var noCacheHeaders = map[string]string{ + "Expires": epoch, + "Cache-Control": "no-cache, private, max-age=0", + "Pragma": "no-cache", + "X-Accel-Expires": "0", +} + +var etagHeaders = []string{ + "ETag", + "If-Modified-Since", + "If-Match", + "If-None-Match", + "If-Range", + "If-Unmodified-Since", +} + +// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent +// a router (or subrouter) from being cached by an upstream proxy and/or client. +// +// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets: +// Expires: Thu, 01 Jan 1970 00:00:00 UTC +// Cache-Control: no-cache, private, max-age=0 +// X-Accel-Expires: 0 +// Pragma: no-cache (for HTTP/1.0 proxies/clients) +func NoCache(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + + // Delete any ETag headers that may have been set + for _, v := range etagHeaders { + if r.Header.Get(v) != "" { + r.Header.Del(v) + } + } + + // Set our NoCache headers + for k, v := range noCacheHeaders { + w.Header().Set(k, v) + } + + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/profiler.go b/vendor/github.com/pressly/chi/middleware/profiler.go new file mode 100644 index 0000000..f2b843c --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/profiler.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/pressly/chi" +) + +// Profiler is a convenient subrouter used for mounting net/http/pprof. ie. +// +// func MyService() http.Handler { +// r := chi.NewRouter() +// // ..middlewares +// r.Mount("/debug", middleware.Profiler()) +// // ..routes +// return r +// } +func Profiler() http.Handler { + r := chi.NewRouter() + r.Use(NoCache) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, r.RequestURI+"/pprof/", 301) + }) + r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, r.RequestURI+"/", 301) + }) + + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.Handle("/pprof/block", pprof.Handler("block")) + r.Handle("/pprof/heap", pprof.Handler("heap")) + r.Handle("/pprof/goroutine", pprof.Handler("goroutine")) + r.Handle("/pprof/threadcreate", pprof.Handler("threadcreate")) + r.HandleFunc("/vars", expVars) + + return r +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/pressly/chi/middleware/realip.go b/vendor/github.com/pressly/chi/middleware/realip.go new file mode 100644 index 0000000..e9addbe --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/realip.go @@ -0,0 +1,54 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "strings" +) + +var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") +var xRealIP = http.CanonicalHeaderKey("X-Real-IP") + +// RealIP is a middleware that sets a http.Request's RemoteAddr to the results +// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that +// order). +// +// This middleware should be inserted fairly early in the middleware stack to +// ensure that subsequent layers (e.g., request loggers) which examine the +// RemoteAddr will see the intended value. +// +// You should only use this middleware if you can trust the headers passed to +// you (in particular, the two headers this middleware uses), for example +// because you have placed a reverse proxy like HAProxy or nginx in front of +// Goji. If your reverse proxies are configured to pass along arbitrary header +// values from the client, or if you use this middleware without a reverse +// proxy, malicious clients will be able to make you very sad (or, depending on +// how you're using RemoteAddr, vulnerable to an attack of some sort). +func RealIP(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if rip := realIP(r); rip != "" { + r.RemoteAddr = rip + } + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +func realIP(r *http.Request) string { + var ip string + + if xff := r.Header.Get(xForwardedFor); xff != "" { + i := strings.Index(xff, ", ") + if i == -1 { + i = len(xff) + } + ip = xff[:i] + } else if xrip := r.Header.Get(xRealIP); xrip != "" { + ip = xrip + } + + return ip +} diff --git a/vendor/github.com/pressly/chi/middleware/recoverer.go b/vendor/github.com/pressly/chi/middleware/recoverer.go new file mode 100644 index 0000000..dc9b64c --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/recoverer.go @@ -0,0 +1,36 @@ +package middleware + +// The original work was derived from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "runtime/debug" +) + +// Recoverer is a middleware that recovers from panics, logs the panic (and a +// backtrace), and returns a HTTP 500 (Internal Server Error) status if +// possible. Recoverer prints a request ID if one is provided. +// +// Alternatively, look at https://github.com/pressly/lg middleware pkgs. +func Recoverer(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + defer func() { + if rvr := recover(); rvr != nil { + + logEntry := GetLogEntry(r) + if logEntry != nil { + logEntry.Panic(rvr, debug.Stack()) + } else { + debug.PrintStack() + } + + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + } + }() + + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/request_id.go b/vendor/github.com/pressly/chi/middleware/request_id.go new file mode 100644 index 0000000..4574bde --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/request_id.go @@ -0,0 +1,88 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "os" + "strings" + "sync/atomic" +) + +// Key to use when setting the request ID. +type ctxKeyRequestID int + +// RequestIDKey is the key that holds th unique request ID in a request context. +const RequestIDKey ctxKeyRequestID = 0 + +var prefix string +var reqid uint64 + +// A quick note on the statistics here: we're trying to calculate the chance that +// two randomly generated base62 prefixes will collide. We use the formula from +// http://en.wikipedia.org/wiki/Birthday_problem +// +// P[m, n] \approx 1 - e^{-m^2/2n} +// +// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server +// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$ +// +// For a $k$ character base-62 identifier, we have $n(k) = 62^k$ +// +// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for +// our purposes, and is surely more than anyone would ever need in practice -- a +// process that is rebooted a handful of times a day for a hundred years has less +// than a millionth of a percent chance of generating two colliding IDs. + +func init() { + hostname, err := os.Hostname() + if hostname == "" || err != nil { + hostname = "localhost" + } + var buf [12]byte + var b64 string + for len(b64) < 10 { + rand.Read(buf[:]) + b64 = base64.StdEncoding.EncodeToString(buf[:]) + b64 = strings.NewReplacer("+", "", "/", "").Replace(b64) + } + + prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10]) +} + +// RequestID is a middleware that injects a request ID into the context of each +// request. A request ID is a string of the form "host.example.com/random-0001", +// where "random" is a base62 random string that uniquely identifies this go +// process, and where the last number is an atomically incremented request +// counter. +func RequestID(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + myid := atomic.AddUint64(&reqid, 1) + ctx := r.Context() + ctx = context.WithValue(ctx, RequestIDKey, fmt.Sprintf("%s-%06d", prefix, myid)) + next.ServeHTTP(w, r.WithContext(ctx)) + } + return http.HandlerFunc(fn) +} + +// GetReqID returns a request ID from the given context if one is present. +// Returns the empty string if a request ID cannot be found. +func GetReqID(ctx context.Context) string { + if ctx == nil { + return "" + } + if reqID, ok := ctx.Value(RequestIDKey).(string); ok { + return reqID + } + return "" +} + +// NextRequestID generates the next request ID in the sequence. +func NextRequestID() uint64 { + return atomic.AddUint64(&reqid, 1) +} diff --git a/vendor/github.com/pressly/chi/middleware/strip.go b/vendor/github.com/pressly/chi/middleware/strip.go new file mode 100644 index 0000000..cff4d7a --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/strip.go @@ -0,0 +1,48 @@ +package middleware + +import ( + "net/http" + + "github.com/pressly/chi" +) + +// StripSlashes is a middleware that will match request paths with a trailing +// slash, strip it from the path and continue routing through the mux, if a route +// matches, then it will serve the handler. +func StripSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + var path string + rctx := chi.RouteContext(r.Context()) + if rctx.RoutePath != "" { + path = rctx.RoutePath + } else { + path = r.URL.Path + } + if len(path) > 1 && path[len(path)-1] == '/' { + rctx.RoutePath = path[:len(path)-1] + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} + +// RedirectSlashes is a middleware that will match request paths with a trailing +// slash and redirect to the same path, less the trailing slash. +func RedirectSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + var path string + rctx := chi.RouteContext(r.Context()) + if rctx.RoutePath != "" { + path = rctx.RoutePath + } else { + path = r.URL.Path + } + if len(path) > 1 && path[len(path)-1] == '/' { + path = path[:len(path)-1] + http.Redirect(w, r, path, 301) + return + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/terminal.go b/vendor/github.com/pressly/chi/middleware/terminal.go new file mode 100644 index 0000000..79930a2 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/terminal.go @@ -0,0 +1,63 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "fmt" + "io" + "os" +) + +var ( + // Normal colors + nBlack = []byte{'\033', '[', '3', '0', 'm'} + nRed = []byte{'\033', '[', '3', '1', 'm'} + nGreen = []byte{'\033', '[', '3', '2', 'm'} + nYellow = []byte{'\033', '[', '3', '3', 'm'} + nBlue = []byte{'\033', '[', '3', '4', 'm'} + nMagenta = []byte{'\033', '[', '3', '5', 'm'} + nCyan = []byte{'\033', '[', '3', '6', 'm'} + nWhite = []byte{'\033', '[', '3', '7', 'm'} + // Bright colors + bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'} + bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'} + bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'} + bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'} + bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'} + bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'} + bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'} + bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'} + + reset = []byte{'\033', '[', '0', 'm'} +) + +var isTTY bool + +func init() { + // This is sort of cheating: if stdout is a character device, we assume + // that means it's a TTY. Unfortunately, there are many non-TTY + // character devices, but fortunately stdout is rarely set to any of + // them. + // + // We could solve this properly by pulling in a dependency on + // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a + // heuristic for whether to print in color or in black-and-white, I'd + // really rather not. + fi, err := os.Stdout.Stat() + if err == nil { + m := os.ModeDevice | os.ModeCharDevice + isTTY = fi.Mode()&m == m + } +} + +// colorWrite +func cW(w io.Writer, color []byte, s string, args ...interface{}) { + if isTTY { + w.Write(color) + } + fmt.Fprintf(w, s, args...) + if isTTY { + w.Write(reset) + } +} diff --git a/vendor/github.com/pressly/chi/middleware/throttler.go b/vendor/github.com/pressly/chi/middleware/throttler.go new file mode 100644 index 0000000..d935e2c --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/throttler.go @@ -0,0 +1,101 @@ +package middleware + +import ( + "net/http" + "time" +) + +const ( + errCapacityExceeded = "Server capacity exceeded." + errTimedOut = "Timed out while waiting for a pending request to complete." + errContextCanceled = "Context was canceled." +) + +var ( + defaultBacklogTimeout = time.Second * 60 +) + +// Throttle is a middleware that limits number of currently processed requests +// at a time. +func Throttle(limit int) func(http.Handler) http.Handler { + return ThrottleBacklog(limit, 0, defaultBacklogTimeout) +} + +// ThrottleBacklog is a middleware that limits number of currently processed +// requests at a time and provides a backlog for holding a finite number of +// pending requests. +func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler { + if limit < 1 { + panic("chi/middleware: Throttle expects limit > 0") + } + + if backlogLimit < 0 { + panic("chi/middleware: Throttle expects backlogLimit to be positive") + } + + t := throttler{ + tokens: make(chan token, limit), + backlogTokens: make(chan token, limit+backlogLimit), + backlogTimeout: backlogTimeout, + } + + // Filling tokens. + for i := 0; i < limit+backlogLimit; i++ { + if i < limit { + t.tokens <- token{} + } + t.backlogTokens <- token{} + } + + fn := func(h http.Handler) http.Handler { + t.h = h + return &t + } + + return fn +} + +// token represents a request that is being processed. +type token struct{} + +// throttler limits number of currently processed requests at a time. +type throttler struct { + h http.Handler + tokens chan token + backlogTokens chan token + backlogTimeout time.Duration +} + +// ServeHTTP is the primary throttler request handler +func (t *throttler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + select { + case <-ctx.Done(): + http.Error(w, errContextCanceled, http.StatusServiceUnavailable) + return + case btok := <-t.backlogTokens: + timer := time.NewTimer(t.backlogTimeout) + + defer func() { + t.backlogTokens <- btok + }() + + select { + case <-timer.C: + http.Error(w, errTimedOut, http.StatusServiceUnavailable) + return + case <-ctx.Done(): + http.Error(w, errContextCanceled, http.StatusServiceUnavailable) + return + case tok := <-t.tokens: + defer func() { + t.tokens <- tok + }() + t.h.ServeHTTP(w, r) + } + return + default: + http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable) + return + } +} diff --git a/vendor/github.com/pressly/chi/middleware/timeout.go b/vendor/github.com/pressly/chi/middleware/timeout.go new file mode 100644 index 0000000..5cabf1f --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/timeout.go @@ -0,0 +1,48 @@ +package middleware + +import ( + "context" + "net/http" + "time" +) + +// Timeout is a middleware that cancels ctx after a given timeout and return +// a 504 Gateway Timeout error to the client. +// +// It's required that you select the ctx.Done() channel to check for the signal +// if the context has reached its deadline and return, otherwise the timeout +// signal will be just ignored. +// +// ie. a route/handler may look like: +// +// r.Get("/long", func(ctx context.Context, w http.ResponseWriter, r *http.Request) { +// processTime := time.Duration(rand.Intn(4)+1) * time.Second +// +// select { +// case <-ctx.Done(): +// return +// +// case <-time.After(processTime): +// // The above channel simulates some hard work. +// } +// +// w.Write([]byte("done")) +// }) +// +func Timeout(timeout time.Duration) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer func() { + cancel() + if ctx.Err() == context.DeadlineExceeded { + w.WriteHeader(http.StatusGatewayTimeout) + } + }() + + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/pressly/chi/middleware/value.go b/vendor/github.com/pressly/chi/middleware/value.go new file mode 100644 index 0000000..fbbd039 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/value.go @@ -0,0 +1,17 @@ +package middleware + +import ( + "context" + "net/http" +) + +// WithValue is a middleware that sets a given key/value in a context chain. +func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(context.WithValue(r.Context(), key, val)) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/pressly/chi/middleware/wrap_writer.go b/vendor/github.com/pressly/chi/middleware/wrap_writer.go new file mode 100644 index 0000000..9991736 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/wrap_writer.go @@ -0,0 +1,144 @@ +package middleware + +// The original work was derived from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook +// into various parts of the response process. +type WrapResponseWriter interface { + http.ResponseWriter + // Status returns the HTTP status of the request, or 0 if one has not + // yet been sent. + Status() int + // BytesWritten returns the total number of bytes sent to the client. + BytesWritten() int + // Tee causes the response body to be written to the given io.Writer in + // addition to proxying the writes through. Only one io.Writer can be + // tee'd to at once: setting a second one will overwrite the first. + // Writes will be sent to the proxy before being written to this + // io.Writer. It is illegal for the tee'd writer to be modified + // concurrently with writes. + Tee(io.Writer) + // Unwrap returns the original proxied target. + Unwrap() http.ResponseWriter +} + +// basicWriter wraps a http.ResponseWriter that implements the minimal +// http.ResponseWriter interface. +type basicWriter struct { + http.ResponseWriter + wroteHeader bool + code int + bytes int + tee io.Writer +} + +func (b *basicWriter) WriteHeader(code int) { + if !b.wroteHeader { + b.code = code + b.wroteHeader = true + b.ResponseWriter.WriteHeader(code) + } +} +func (b *basicWriter) Write(buf []byte) (int, error) { + b.WriteHeader(http.StatusOK) + n, err := b.ResponseWriter.Write(buf) + if b.tee != nil { + _, err2 := b.tee.Write(buf[:n]) + // Prefer errors generated by the proxied writer. + if err == nil { + err = err2 + } + } + b.bytes += n + return n, err +} +func (b *basicWriter) maybeWriteHeader() { + if !b.wroteHeader { + b.WriteHeader(http.StatusOK) + } +} +func (b *basicWriter) Status() int { + return b.code +} +func (b *basicWriter) BytesWritten() int { + return b.bytes +} +func (b *basicWriter) Tee(w io.Writer) { + b.tee = w +} +func (b *basicWriter) Unwrap() http.ResponseWriter { + return b.ResponseWriter +} + +type flushWriter struct { + basicWriter +} + +func (f *flushWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.Flusher = &flushWriter{} + +// httpFancyWriter is a HTTP writer that additionally satisfies http.CloseNotifier, +// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type httpFancyWriter struct { + basicWriter +} + +func (f *httpFancyWriter) CloseNotify() <-chan bool { + cn := f.basicWriter.ResponseWriter.(http.CloseNotifier) + return cn.CloseNotify() +} +func (f *httpFancyWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} +func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj := f.basicWriter.ResponseWriter.(http.Hijacker) + return hj.Hijack() +} +func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) { + if f.basicWriter.tee != nil { + return io.Copy(&f.basicWriter, r) + } + rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) + f.basicWriter.maybeWriteHeader() + return rf.ReadFrom(r) +} + +var _ http.CloseNotifier = &httpFancyWriter{} +var _ http.Flusher = &httpFancyWriter{} +var _ http.Hijacker = &httpFancyWriter{} +var _ io.ReaderFrom = &httpFancyWriter{} + +// http2FancyWriter is a HTTP2 writer that additionally satisfies http.CloseNotifier, +// http.Flusher, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type http2FancyWriter struct { + basicWriter +} + +func (f *http2FancyWriter) CloseNotify() <-chan bool { + cn := f.basicWriter.ResponseWriter.(http.CloseNotifier) + return cn.CloseNotify() +} +func (f *http2FancyWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.CloseNotifier = &http2FancyWriter{} +var _ http.Flusher = &http2FancyWriter{} diff --git a/vendor/github.com/pressly/chi/middleware/wrap_writer17.go b/vendor/github.com/pressly/chi/middleware/wrap_writer17.go new file mode 100644 index 0000000..c60df60 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/wrap_writer17.go @@ -0,0 +1,34 @@ +// +build go1.7,!go1.8 + +package middleware + +import ( + "io" + "net/http" +) + +// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to +// hook into various parts of the response process. +func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + + bw := basicWriter{ResponseWriter: w} + + if protoMajor == 2 { + if cn && fl { + return &http2FancyWriter{bw} + } + } else { + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + if cn && fl && hj && rf { + return &httpFancyWriter{bw} + } + } + if fl { + return &flushWriter{bw} + } + + return &bw +} diff --git a/vendor/github.com/pressly/chi/middleware/wrap_writer18.go b/vendor/github.com/pressly/chi/middleware/wrap_writer18.go new file mode 100644 index 0000000..9233d8b --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/wrap_writer18.go @@ -0,0 +1,41 @@ +// +build go1.8 + +package middleware + +import ( + "io" + "net/http" +) + +// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to +// hook into various parts of the response process. +func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + + bw := basicWriter{ResponseWriter: w} + + if protoMajor == 2 { + _, ps := w.(http.Pusher) + if cn && fl && ps { + return &http2FancyWriter{bw} + } + } else { + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + if cn && fl && hj && rf { + return &httpFancyWriter{bw} + } + } + if fl { + return &flushWriter{bw} + } + + return &bw +} + +func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error { + return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var _ http.Pusher = &http2FancyWriter{} diff --git a/vendor/github.com/pressly/chi/mux.go b/vendor/github.com/pressly/chi/mux.go new file mode 100644 index 0000000..b16dab1 --- /dev/null +++ b/vendor/github.com/pressly/chi/mux.go @@ -0,0 +1,408 @@ +package chi + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" +) + +var _ Router = &Mux{} + +// Mux is a simple HTTP route multiplexer that parses a request path, +// records any URL params, and executes an end handler. It implements +// the http.Handler interface and is friendly with the standard library. +// +// Mux is designed to be fast, minimal and offer a powerful API for building +// modular and composable HTTP services with a large set of handlers. It's +// particularly useful for writing large REST API services that break a handler +// into many smaller parts composed of middlewares and end handlers. +type Mux struct { + // The radix trie router + tree *node + + // The middleware stack + middlewares []func(http.Handler) http.Handler + + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. + inline bool + + // The computed mux handler made of the chained middleware stack and + // the tree router + handler http.Handler + + // Routing context pool + pool sync.Pool + + // Custom route not found handler + notFoundHandler http.HandlerFunc + + // Custom method not allowed handler + methodNotAllowedHandler http.HandlerFunc +} + +// NewMux returns a newly initialized Mux object that implements the Router +// interface. +func NewMux() *Mux { + mux := &Mux{tree: &node{}} + mux.pool.New = func() interface{} { + return NewRouteContext() + } + return mux +} + +// ServeHTTP is the single method of the http.Handler interface that makes +// Mux interoperable with the standard library. It uses a sync.Pool to get and +// reuse routing contexts for each request. +func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Ensure the mux has some routes defined on the mux + if mx.handler == nil { + panic("chi: attempting to route to a mux with no handlers.") + } + + // Check if a routing context already exists from a parent router. + rctx, _ := r.Context().Value(RouteCtxKey).(*Context) + if rctx != nil { + mx.handler.ServeHTTP(w, r) + return + } + + // Fetch a RouteContext object from the sync pool, and call the computed + // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. + // Once the request is finished, reset the routing context and put it back + // into the pool for reuse from another request. + rctx = mx.pool.Get().(*Context) + rctx.reset() + r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) + mx.handler.ServeHTTP(w, r) + mx.pool.Put(rctx) +} + +// Use appends a middleware handler to the Mux middleware stack. +// +// The middleware stack for any Mux will execute before searching for a matching +// route to a specific handler, which provides opportunity to respond early, +// change the course of the request execution, or set request-scoped values for +// the next http.Handler. +func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { + if mx.handler != nil { + panic("chi: all middlewares must be defined before routes on a mux") + } + mx.middlewares = append(mx.middlewares, middlewares...) +} + +// Handle adds the route `pattern` that matches any http method to +// execute the `handler` http.Handler. +func (mx *Mux) Handle(pattern string, handler http.Handler) { + mx.handle(mALL, pattern, handler) +} + +// HandleFunc adds the route `pattern` that matches any http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mALL, pattern, handlerFn) +} + +// Connect adds the route `pattern` that matches a CONNECT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mCONNECT, pattern, handlerFn) +} + +// Delete adds the route `pattern` that matches a DELETE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mDELETE, pattern, handlerFn) +} + +// Get adds the route `pattern` that matches a GET http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mGET, pattern, handlerFn) +} + +// Head adds the route `pattern` that matches a HEAD http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mHEAD, pattern, handlerFn) +} + +// Options adds the route `pattern` that matches a OPTIONS http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mOPTIONS, pattern, handlerFn) +} + +// Patch adds the route `pattern` that matches a PATCH http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPATCH, pattern, handlerFn) +} + +// Post adds the route `pattern` that matches a POST http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPOST, pattern, handlerFn) +} + +// Put adds the route `pattern` that matches a PUT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPUT, pattern, handlerFn) +} + +// Trace adds the route `pattern` that matches a TRACE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mTRACE, pattern, handlerFn) +} + +// NotFound sets a custom http.HandlerFunc for routing paths that could +// not be found. The default 404 handler is `http.NotFound`. +func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { + mx.notFoundHandler = handlerFn + + mx.updateSubRoutes(func(subMux *Mux) { + if subMux.notFoundHandler == nil { + subMux.NotFound(handlerFn) + } + }) +} + +// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the +// method is unresolved. The default handler returns a 405 with an empty body. +func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { + mx.methodNotAllowedHandler = handlerFn + + mx.updateSubRoutes(func(subMux *Mux) { + if subMux.methodNotAllowedHandler == nil { + subMux.MethodNotAllowed(handlerFn) + } + }) +} + +// With adds inline middlewares for an endpoint handler. +func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { + // Similarly as in handle(), we must build the mux handler once further + // middleware registration isn't allowed for this stack, like now. + if !mx.inline && mx.handler == nil { + mx.buildRouteHandler() + } + + // Copy middlewares from parent inline muxs + var mws Middlewares + if mx.inline { + mws = make(Middlewares, len(mx.middlewares)) + copy(mws, mx.middlewares) + } + mws = append(mws, middlewares...) + + im := &Mux{inline: true, tree: mx.tree, middlewares: mws} + return im +} + +// Group creates a new inline-Mux with a fresh middleware stack. It's useful +// for a group of handlers along the same routing path that use an additional +// set of middlewares. See _examples/. +func (mx *Mux) Group(fn func(r Router)) Router { + im := mx.With().(*Mux) + if fn != nil { + fn(im) + } + return im +} + +// Route creates a new Mux with a fresh middleware stack and mounts it +// along the `pattern` as a subrouter. Effectively, this is a short-hand +// call to Mount. See _examples/. +func (mx *Mux) Route(pattern string, fn func(r Router)) Router { + subRouter := NewRouter() + if fn != nil { + fn(subRouter) + } + mx.Mount(pattern, subRouter) + return subRouter +} + +// Mount attaches another http.Handler or chi Router as a subrouter along a routing +// path. It's very useful to split up a large API as many independent routers and +// compose them as a single service using Mount. See _examples/. +// +// Note that Mount() simply sets a wildcard along the `pattern` that will continue +// routing at the `handler`, which in most cases is another chi.Router. As a result, +// if you define two Mount() routes on the exact same pattern the mount will panic. +func (mx *Mux) Mount(pattern string, handler http.Handler) { + // Provide runtime safety for ensuring a pattern isn't mounted on an existing + // routing pattern. + if mx.tree.findPattern(pattern+"*") != nil || mx.tree.findPattern(pattern+"/*") != nil { + panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) + } + + // Assign sub-Router's with the parent not found & method not allowed handler if not specified. + subr, ok := handler.(*Mux) + if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { + subr.NotFound(mx.notFoundHandler) + } + if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { + subr.MethodNotAllowed(mx.methodNotAllowedHandler) + } + + // Wrap the sub-router in a handlerFunc to scope the request path for routing. + subHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rctx := RouteContext(r.Context()) + rctx.RoutePath = "/" + rctx.URLParams.Del("*") + handler.ServeHTTP(w, r) + }) + + if pattern == "" || pattern[len(pattern)-1] != '/' { + mx.handle(mALL|mSTUB, pattern, subHandler) + mx.handle(mALL|mSTUB, pattern+"/", mx.NotFoundHandler()) + pattern += "/" + } + + method := mALL + subroutes, _ := handler.(Routes) + if subroutes != nil { + method |= mSTUB + } + n := mx.handle(method, pattern+"*", subHandler) + + if subroutes != nil { + n.subroutes = subroutes + } +} + +func (mx *Mux) Middlewares() Middlewares { + return mx.middlewares +} + +func (mx *Mux) Routes() []Route { + return mx.tree.routes() +} + +// FileServer conveniently sets up a http.FileServer handler to serve +// static files from a http.FileSystem. +func (mx *Mux) FileServer(path string, root http.FileSystem) { + if strings.ContainsAny(path, ":*") { + panic("chi: FileServer does not permit URL parameters.") + } + + fs := http.StripPrefix(path, http.FileServer(root)) + + if path != "/" && path[len(path)-1] != '/' { + mx.Get(path, http.RedirectHandler(path+"/", 301).ServeHTTP) + path += "/" + } + path += "*" + + mx.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fs.ServeHTTP(w, r) + })) +} + +// NotFoundHandler returns the default Mux 404 responder whenever a route +// cannot be found. +func (mx *Mux) NotFoundHandler() http.HandlerFunc { + if mx.notFoundHandler != nil { + return mx.notFoundHandler + } + return http.NotFound +} + +// MethodNotAllowedHandler returns the default Mux 405 responder whenever +// a method cannot be resolved for a route. +func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { + if mx.methodNotAllowedHandler != nil { + return mx.methodNotAllowedHandler + } + return methodNotAllowedHandler +} + +// buildRouteHandler builds the single mux handler that is a chain of the middleware +// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this +// point, no other middlewares can be registered on this Mux's stack. But you can still +// compose additional middlewares via Group()'s or using a chained middleware handler. +func (mx *Mux) buildRouteHandler() { + mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) +} + +// handle registers a http.Handler in the routing tree for a particular http method +// and routing pattern. +func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { + if len(pattern) == 0 || pattern[0] != '/' { + panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) + } + + // Build the final routing handler for this Mux. + if !mx.inline && mx.handler == nil { + mx.buildRouteHandler() + } + + // Build endpoint handler with inline middlewares for the route + var h http.Handler + if mx.inline { + mx.handler = http.HandlerFunc(mx.routeHTTP) + h = Chain(mx.middlewares...).Handler(handler) + } else { + h = handler + } + + // Add the endpoint to the tree and return the node + return mx.tree.InsertRoute(method, pattern, h) +} + +// routeHTTP routes a http.Request through the Mux routing tree to serve +// the matching handler for a particular http method. +func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { + // Grab the route context object + rctx := r.Context().Value(RouteCtxKey).(*Context) + + // The request routing path + routePath := rctx.RoutePath + if routePath == "" { + routePath = r.URL.Path + } + + // Check if method is supported by chi + method, ok := methodMap[r.Method] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Find the route + hs := mx.tree.FindRoute(rctx, routePath) + if hs == nil { + mx.NotFoundHandler().ServeHTTP(w, r) + return + } + + h, ok := hs[method] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Serve it up + h.ServeHTTP(w, r) +} + +// Recursively update data on child routers. +func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { + for _, r := range mx.tree.routes() { + subMux, ok := r.SubRoutes.(*Mux) + if !ok { + continue + } + fn(subMux) + } +} + +// methodNotAllowedHandler is a helper function to respond with a 405, +// method not allowed. +func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(405) + w.Write(nil) +} diff --git a/vendor/github.com/pressly/chi/tree.go b/vendor/github.com/pressly/chi/tree.go new file mode 100644 index 0000000..7e5e4fc --- /dev/null +++ b/vendor/github.com/pressly/chi/tree.go @@ -0,0 +1,535 @@ +package chi + +// Radix tree implementation below is a based on the original work by +// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go +// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. + +import ( + "net/http" + "sort" + "strings" +) + +type methodTyp int + +const ( + mCONNECT methodTyp = 1 << iota + mDELETE + mGET + mHEAD + mOPTIONS + mPATCH + mPOST + mPUT + mTRACE + mSTUB + + mALL methodTyp = mCONNECT | mDELETE | mGET | mHEAD | mOPTIONS | + mPATCH | mPOST | mPUT | mTRACE +) + +var methodMap = map[string]methodTyp{ + "CONNECT": mCONNECT, + "DELETE": mDELETE, + "GET": mGET, + "HEAD": mHEAD, + "OPTIONS": mOPTIONS, + "PATCH": mPATCH, + "POST": mPOST, + "PUT": mPUT, + "TRACE": mTRACE, +} + +type nodeTyp uint8 + +const ( + ntStatic nodeTyp = iota // /home + ntRegexp // /:id([0-9]+) or #id^[0-9]+$ + ntParam // /:user + ntCatchAll // /api/v1/* +) + +type node struct { + // node type + typ nodeTyp + + // first byte of the prefix + label byte + + // prefix is the common prefix we ignore + prefix string + + // pattern is the computed path of prefixes + pattern string + + // HTTP handler on the leaf node + handlers methodHandlers + + // chi subroutes on the leaf node + subroutes Routes + + // Child nodes should be stored in-order for iteration, + // in groups of the node type. + children [ntCatchAll + 1]nodes +} + +func (n *node) FindRoute(rctx *Context, path string) methodHandlers { + // Reset the context routing pattern + rctx.RoutePattern = "" + + // Find the routing handlers for the path + rn := n.findRoute(rctx, path) + if rn == nil { + return nil + } + + // Record the routing pattern in the request lifecycle + if rn.pattern != "" { + rctx.RoutePattern = rn.pattern + rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.RoutePattern) + } + + return rn.handlers +} + +func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { + var parent *node + search := pattern + + for { + // Handle key exhaustion + if len(search) == 0 { + // Insert or update the node's leaf handler + n.setHandler(method, handler) + n.pattern = pattern + return n + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + cn := &node{label: search[0], prefix: search, pattern: pattern} + cn.setHandler(method, handler) + parent.addChild(pattern, cn) + return cn + } + + if n.typ > ntStatic { + // We found a wildcard node, meaning search path starts with + // a wild prefix. Trim off the wildcard search path and continue. + p := strings.Index(search, "/") + if p < 0 { + p = len(search) + } + search = search[p:] + continue + } + + // Static nodes fall below here. + // Determine longest prefix of the search key on match. + commonPrefix := n.longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + // the common prefix is as long as the current node's prefix we're attempting to insert. + // keep the search going. + search = search[commonPrefix:] + continue + } + + // Split the node + child := &node{ + typ: ntStatic, + prefix: search[:commonPrefix], + } + parent.replaceChild(search[0], child) + + // Restore the existing node + n.label = n.prefix[commonPrefix] + n.prefix = n.prefix[commonPrefix:] + child.addChild(pattern, n) + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.setHandler(method, handler) + child.pattern = pattern + return child + } + + // Create a new edge for the node + subchild := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + pattern: pattern, + } + subchild.setHandler(method, handler) + child.addChild(pattern, subchild) + return subchild + } +} + +func (n *node) findPattern(pattern string) *node { + nn := n + for _, nds := range nn.children { + if len(nds) == 0 { + continue + } + + n = nn.getEdge(pattern[0]) + if n == nil { + continue + } + + idx := n.longestPrefix(pattern, n.prefix) + xpattern := pattern[idx:] + + if len(xpattern) == 0 { + return n + } else if xpattern[0] == '/' && idx < len(n.prefix) { + continue + } + + return n.findPattern(xpattern) + } + return nil +} + +func (n *node) isLeaf() bool { + return n.handlers != nil +} + +func (n *node) addChild(pattern string, child *node) { + search := child.prefix + + // Find any wildcard segments + p := strings.IndexAny(search, ":*") + + // Determine new node type + ntyp := child.typ + if p >= 0 { + switch search[p] { + case ':': + ntyp = ntParam + case '*': + ntyp = ntCatchAll + } + } + + if p == 0 { + // Path starts with a wildcard + + handlers := child.handlers + child.typ = ntyp + + if ntyp == ntCatchAll { + p = -1 + } else { + p = strings.IndexByte(search, '/') + } + if p < 0 { + p = len(search) + } + child.prefix = search[:p] + + if p != len(search) { + // add edge for the remaining part, split the end. + child.handlers = nil + + search = search[p:] + + child.addChild(pattern, &node{ + typ: ntStatic, + label: search[0], // this will always start with / + prefix: search, + pattern: pattern, + handlers: handlers, + }) + } + + } else if p > 0 { + // Path has some wildcard + + // starts with a static segment + handlers := child.handlers + child.typ = ntStatic + child.prefix = search[:p] + child.handlers = nil + + // add the wild edge node + search = search[p:] + + child.addChild(pattern, &node{ + typ: ntyp, + label: search[0], + prefix: search, + pattern: pattern, + handlers: handlers, + }) + + } else { + // Path is all static + child.typ = ntyp + + } + + n.children[child.typ] = append(n.children[child.typ], child) + n.children[child.typ].Sort() +} + +func (n *node) replaceChild(label byte, child *node) { + for i := 0; i < len(n.children[child.typ]); i++ { + if n.children[child.typ][i].label == label { + n.children[child.typ][i] = child + n.children[child.typ][i].label = label + return + } + } + + panic("chi: replacing missing child") +} + +func (n *node) getEdge(label byte) *node { + for _, nds := range n.children { + num := len(nds) + for i := 0; i < num; i++ { + if nds[i].label == label { + return nds[i] + } + } + } + return nil +} + +func (n *node) findEdge(ntyp nodeTyp, label byte) *node { + nds := n.children[ntyp] + num := len(nds) + idx := 0 + + switch ntyp { + case ntStatic: + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > nds[idx].label { + i = idx + 1 + } else if label < nds[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if nds[idx].label != label { + return nil + } + return nds[idx] + + default: // wild nodes + // TODO: right now we match them all.. but regexp should + // run through regexp matcher + return nds[idx] + } +} + +// Recursive edge traversal by checking all nodeTyp groups along the way. +// It's like searching through a multi-dimensional radix trie. +func (n *node) findRoute(rctx *Context, path string) *node { + nn := n + search := path + + for t, nds := range nn.children { + ntyp := nodeTyp(t) + if len(nds) == 0 { + continue + } + + // search subset of edges of the index for a matching node + var label byte + if search != "" { + label = search[0] + } + + xn := nn.findEdge(ntyp, label) // next node + if xn == nil { + continue + } + + // Prepare next search path by trimming prefix from requested path + xsearch := search + if xn.typ > ntStatic { + p := -1 + if xn.typ < ntCatchAll { + p = strings.IndexByte(xsearch, '/') + } + if p < 0 { + p = len(xsearch) + } + + if xn.typ == ntCatchAll { + rctx.URLParams.Add("*", xsearch) + } else { + rctx.URLParams.Add(xn.prefix[1:], xsearch[:p]) + } + + xsearch = xsearch[p:] + } else if strings.HasPrefix(xsearch, xn.prefix) { + xsearch = xsearch[len(xn.prefix):] + } else { + continue // no match + } + + // did we find it yet? + if len(xsearch) == 0 { + if xn.isLeaf() { + return xn + } + } + + // recursively find the next node.. + fin := xn.findRoute(rctx, xsearch) + if fin != nil { + // found a node, return it + return fin + } + + // Did not found final handler, let's remove the param here if it was set + if xn.typ > ntStatic { + if xn.typ == ntCatchAll { + rctx.URLParams.Del("*") + } else { + rctx.URLParams.Del(xn.prefix[1:]) + } + } + } + + return nil +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func (n *node) longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +func (n *node) setHandler(method methodTyp, handler http.Handler) { + if n.handlers == nil { + n.handlers = make(methodHandlers, 0) + } + if method&mSTUB == mSTUB { + n.handlers[mSTUB] = handler + } else { + n.handlers[mSTUB] = nil + } + if method&mALL == mALL { + n.handlers[mALL] = handler + for _, m := range methodMap { + n.handlers[m] = handler + } + } else { + n.handlers[method] = handler + } +} + +func (n *node) isEmpty() bool { + for _, nds := range n.children { + if len(nds) > 0 { + return false + } + } + return true +} + +func (n *node) routes() []Route { + rts := []Route{} + + n.walkRoutes(n.prefix, n, func(pattern string, handlers methodHandlers, subroutes Routes) bool { + if handlers[mSTUB] != nil && subroutes == nil { + return false + } + + if subroutes != nil && len(pattern) > 2 { + pattern = pattern[:len(pattern)-2] + } + + var hs = make(map[string]http.Handler, 0) + if handlers[mALL] != nil { + hs["*"] = handlers[mALL] + } + for mt, h := range handlers { + if h == nil { + continue + } + m := methodTypString(mt) + if m == "" { + continue + } + hs[m] = h + } + + rt := Route{pattern, hs, subroutes} + rts = append(rts, rt) + return false + }) + + return rts +} + +func (n *node) walkRoutes(pattern string, nd *node, fn walkFn) bool { + pattern = nd.pattern + + // Visit the leaf values if any + if (nd.handlers != nil || nd.subroutes != nil) && fn(pattern, nd.handlers, nd.subroutes) { + return true + } + + // Recurse on the children + for _, nds := range nd.children { + for _, nd := range nds { + if n.walkRoutes(pattern, nd, fn) { + return true + } + } + } + return false +} + +func methodTypString(method methodTyp) string { + for s, t := range methodMap { + if method == t { + return s + } + } + return "" +} + +type walkFn func(pattern string, handlers methodHandlers, subroutes Routes) bool + +// methodHandlers is a mapping of http method constants to handlers +// for a given route. +type methodHandlers map[methodTyp]http.Handler + +type nodes []*node + +// Sort the list of nodes by label +func (ns nodes) Len() int { return len(ns) } +func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } +func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodes) Sort() { sort.Sort(ns) } + +type Route struct { + Pattern string + Handlers map[string]http.Handler + SubRoutes Routes +} diff --git a/vendor/github.com/tidwall/btree/LICENSE b/vendor/github.com/tidwall/btree/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/tidwall/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tidwall/btree/README.md b/vendor/github.com/tidwall/btree/README.md new file mode 100644 index 0000000..deb1e88 --- /dev/null +++ b/vendor/github.com/tidwall/btree/README.md @@ -0,0 +1,107 @@ +BTree implementation for Go +=========================== + +![Travis CI Build Status](https://api.travis-ci.org/tidwall/btree.svg?branch=master) +[![GoDoc](https://godoc.org/github.com/tidwall/btree?status.svg)](https://godoc.org/github.com/tidwall/btree) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +This is a fork of the wonderful [google/btree](https://github.com/google/btree) package. It's has all the same great features and adds a few more. + +- Descend* functions for iterating backwards. +- Iteration performance boost. +- User defined context. + +User defined context +-------------------- +This is a great new feature that allows for entering the same item into multiple B-trees, and each B-tree have a different ordering formula. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/btree" +) + +type Item struct { + Key, Val string +} + +func (i1 *Item) Less(item btree.Item, ctx interface{}) bool { + i2 := item.(*Item) + switch tag := ctx.(type) { + case string: + if tag == "vals" { + if i1.Val < i2.Val { + return true + } else if i1.Val > i2.Val { + return false + } + // Both vals are equal so we should fall though + // and let the key comparison take over. + } + } + return i1.Key < i2.Key +} + +func main() { + + // Create a tree for keys and a tree for values. + // The "keys" tree will be sorted on the Keys field. + // The "values" tree will be sorted on the Values field. + keys := btree.New(16, "keys") + vals := btree.New(16, "vals") + + // Create some items. + users := []*Item{ + &Item{Key: "user:1", Val: "Jane"}, + &Item{Key: "user:2", Val: "Andy"}, + &Item{Key: "user:3", Val: "Steve"}, + &Item{Key: "user:4", Val: "Andrea"}, + &Item{Key: "user:5", Val: "Janet"}, + &Item{Key: "user:6", Val: "Andy"}, + } + + // Insert each user into both trees + for _, user := range users { + keys.ReplaceOrInsert(user) + vals.ReplaceOrInsert(user) + } + + // Iterate over each user in the key tree + keys.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) + + fmt.Printf("\n") + // Iterate over each user in the val tree + vals.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) +} + +// Should see the results +/* +user:1 Jane +user:2 Andy +user:3 Steve +user:4 Andrea +user:5 Janet +user:6 Andy + +user:4 Andrea +user:2 Andy +user:6 Andy +user:1 Jane +user:3 Steve +*/ +``` diff --git a/vendor/github.com/tidwall/btree/btree.go b/vendor/github.com/tidwall/btree/btree.go new file mode 100644 index 0000000..26f0d23 --- /dev/null +++ b/vendor/github.com/tidwall/btree/btree.go @@ -0,0 +1,968 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + // + // There is a user-defined ctx argument that is equal to the ctx value which + // is set at time of the btree contruction. + Less(than Item, ctx interface{}) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeList) freeNode(n *node) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } + f.mu.Unlock() +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int, ctx interface{}) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize), ctx) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList, ctx interface{}) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + ctx: ctx, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item, ctx interface{}) (index int, found bool) { + i, j := 0, len(s) + for i < j { + h := i + (j-i)/2 + if !item.Less(s[h], ctx) { + i = h + 1 + } else { + j = h + } + } + if i > 0 && !s[i-1].Less(item, ctx) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int, ctx interface{}) Item { + i, found := n.items.find(item, ctx) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree, ctx): + // no change, we want first split node + case inTree.Less(item, ctx): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems, ctx) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item, ctx interface{}) Item { + i, found := n.items.find(key, ctx) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key, ctx) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove, ctx interface{}) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item, ctx) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ, ctx) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax, ctx) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ, ctx) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove, ctx interface{}) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ, ctx) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator, ctx interface{}) (bool, bool) { + var ok bool + switch dir { + case ascend: + for i := 0; i < len(n.items); i++ { + if start != nil && n.items[i].Less(start, ctx) { + continue + } + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i], ctx) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop, ctx) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + case descend: + for i := len(n.items) - 1; i >= 0; i-- { + if start != nil && !n.items[i].Less(start, ctx) { + if !includeStart || hit || start.Less(n.items[i], ctx) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i], ctx) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + ctx interface{} + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +func (c *copyOnWriteContext) freeNode(n *node) { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + c.freelist.freeNode(n) + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems(), t.ctx) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem, t.ctx) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin, t.ctx) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax, t.ctx) +} + +func (t *BTree) deleteItem(item Item, typ toRemove, ctx interface{}) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ, ctx) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator, t.ctx) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator, t.ctx) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator, t.ctx) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator, t.ctx) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator, t.ctx) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator, t.ctx) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator, t.ctx) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator, t.ctx) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key, t.ctx) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item, ctx interface{}) bool { + return a < b.(Int) +} + +type stackItem struct { + n *node // current node + i int // index of the next child/item. +} + +// Cursor represents an iterator that can traverse over all items in the tree +// in sorted order. +// +// Changing data while traversing a cursor may result in unexpected items to +// be returned. You must reposition your cursor after mutating data. +type Cursor struct { + t *BTree + stack []stackItem +} + +// Cursor returns a new cursor used to traverse over items in the tree. +func (t *BTree) Cursor() *Cursor { + return &Cursor{t: t} +} + +// First moves the cursor to the first item in the tree and returns that item. +func (c *Cursor) First() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n}) + for len(n.children) > 0 { + n = n.children[0] + c.stack = append(c.stack, stackItem{n: n}) + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// Next moves the cursor to the next item and returns that item. +func (c *Cursor) Next() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i++ + n := c.stack[si].n + i := c.stack[si].i + if i == len(n.children)+len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + if len(n.children) == 0 { + if i >= len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + c.stack = append(c.stack, stackItem{n: n.children[i/2], i: -1}) + return c.Next() + +} + +// Last moves the cursor to the last item in the tree and returns that item. +func (c *Cursor) Last() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// Prev moves the cursor to the previous item and returns that item. +func (c *Cursor) Prev() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i-- + n := c.stack[si].n + i := c.stack[si].i + if i == -1 { + c.stack = c.stack[:len(c.stack)-1] + return c.Prev() + } + if len(n.children) == 0 { + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + child := n.children[i/2] + c.stack = append(c.stack, stackItem{n: child, + i: len(child.children) + len(child.items)}) + return c.Prev() +} + +// Seek moves the cursor to provided item and returns that item. +// If the item does not exist then the next item is returned. +func (c *Cursor) Seek(pivot Item) Item { + c.stack = c.stack[:0] + n := c.t.root + for n != nil { + i, found := n.items.find(pivot, c.t.ctx) + c.stack = append(c.stack, stackItem{n: n}) + if found { + if len(n.children) == 0 { + c.stack[len(c.stack)-1].i = i + } else { + c.stack[len(c.stack)-1].i = i*2 + 1 + } + return n.items[i] + } + if len(n.children) == 0 { + if i == len(n.items) { + c.stack[len(c.stack)-1].i = i + 1 + return c.Next() + } + c.stack[len(c.stack)-1].i = i + return n.items[i] + } + c.stack[len(c.stack)-1].i = i * 2 + n = n.children[i] + } + return nil +} diff --git a/vendor/github.com/tidwall/buntdb/LICENSE b/vendor/github.com/tidwall/buntdb/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/buntdb/README.md b/vendor/github.com/tidwall/buntdb/README.md new file mode 100644 index 0000000..e595b57 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/README.md @@ -0,0 +1,606 @@ +

+BuntDB +
+Build Status +Code Coverage +Go Report Card +GoDoc +

+ +==== + +BuntDB is a low-level, in-memory, key/value store in pure Go. +It persists to disk, is ACID compliant, and uses locking for multiple +readers and a single writer. It supports custom indexes and geospatial +data. It's ideal for projects that need a dependable database and favor +speed over data size. + +The desire to create BuntDB stems from the need for a new embeddable +database for [Tile38](https://github.com/tidwall/tile38) and [SummitDB](https://github.com/tidwall/summitdb). + +Features +======== + +- In-memory database for [fast reads and writes](#performance) +- Embeddable with a [simple API](https://godoc.org/github.com/tidwall/buntdb) +- [Spatial indexing](#spatial-indexes) for up to 20 dimensions; Useful for Geospatial data +- Index fields inside [JSON](#json-indexes) documents +- [Collate i18n Indexes](#collate-i18n-indexes) using the optional [collate package](https://github.com/tidwall/collate) +- Create [custom indexes](#custom-indexes) for any data type +- Support for [multi value indexes](#multi-value-index); Similar to a SQL multi column index +- [Built-in types](#built-in-types) that are easy to get up & running; String, Uint, Int, Float +- Flexible [iteration](#iterating) of data; ascending, descending, and ranges +- [Durable append-only file](#append-only-file) format for persistence +- Option to evict old items with an [expiration](#data-expiration) TTL +- Tight codebase, under 2K loc using the `cloc` command +- ACID semantics with locking [transactions](#transactions) that support rollbacks + + +Getting Started +=============== + +## Installing + +To start using BuntDB, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/buntdb +``` + +This will retrieve the library. + + +## Opening a database + +The primary object in BuntDB is a `DB`. To open or create your +database, use the `buntdb.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/tidwall/buntdb" +) + +func main() { + // Open the data.db file. It will be created if it doesn't exist. + db, err := buntdb.Open("data.db") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +It's also possible to open a database that does not persist to disk by using `:memory:` as the path of the file. + +```go +buntdb.Open(":memory:") // Open a file that does not persist to disk. +``` + +## Transactions +All reads and writes must be performed from inside a transaction. BuntDB can have one write transaction opened at a time, but can have many concurrent read transactions. Each transaction maintains a stable view of the database. In other words, once a transaction has begun, the data for that transaction cannot be changed by other transactions. + +Transactions run in a function that exposes a `Tx` object, which represents the transaction state. While inside a transaction, all database operations should be performed using this object. You should never access the origin `DB` object while inside a transaction. Doing so may have side-effects, such as blocking your application. + +When a transaction fails, it will roll back, and revert all changes that occurred to the database during that transaction. There's a single return value that you can use to close the transaction. For read/write transactions, returning an error this way will force the transaction to roll back. When a read/write transaction succeeds all changes are persisted to disk. + +### Read-only Transactions +A read-only transaction should be used when you don't need to make changes to the data. The advantage of a read-only transaction is that there can be many running concurrently. + +```go +err := db.View(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +### Read/write Transactions +A read/write transaction is used when you need to make changes to your data. There can only be one read/write transaction running at a time. So make sure you close it as soon as you are done with it. + +```go +err := db.Update(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +## Setting and getting key/values + +To set a value you must open a read/write transaction: + +```go +err := db.Update(func(tx *buntdb.Tx) error { + _, _, err := tx.Set("mykey", "myvalue", nil) + return err +}) +``` + + +To get the value: + +```go +err := db.View(func(tx *buntdb.Tx) error { + val, err := tx.Get("mykey") + if err != nil{ + return err + } + fmt.Printf("value is %s\n", val) + return nil +}) +``` + +Getting non-existent values will case an `ErrNotFound` error. + +### Iterating +All keys/value pairs are ordered in the database by the key. To iterate over the keys: + +```go +err := db.View(func(tx *buntdb.Tx) error { +err := tx.Ascend("", func(key, value string) bool{ + fmt.Printf("key: %s, value: %s\n", key, value) + }) + return err +}) +``` + +There is also `AscendGreaterOrEqual`, `AscendLessThan`, `AscendRange`, `Descend`, `DescendLessOrEqual`, `DescendGreaterThan`, and `DescendRange`. Please see the [documentation](https://godoc.org/github.com/tidwall/buntdb) for more information on these functions. + + +## Custom Indexes +Initially all data is stored in a single [B-tree](https://en.wikipedia.org/wiki/B-tree) with each item having one key and one value. All of these items are ordered by the key. This is great for quickly getting a value from a key or [iterating](#iterating) over the keys. Feel free to peruse the [B-tree implementation](https://github.com/tidwall/btree). + +You can also create custom indexes that allow for ordering and [iterating](#iterating) over values. A custom index also uses a B-tree, but it's more flexible because it allows for custom ordering. + +For example, let's say you want to create an index for ordering names: + +```go +db.CreateIndex("names", "*", buntdb.IndexString) +``` + +This will create an index named `names` which stores and sorts all values. The second parameter is a pattern that is used to filter on keys. A `*` wildcard argument means that we want to accept all keys. `IndexString` is a built-in function that performs case-insensitive ordering on the values + +Now you can add various names: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:name", "tom", nil) + tx.Set("user:1:name", "Randi", nil) + tx.Set("user:2:name", "jane", nil) + tx.Set("user:4:name", "Janet", nil) + tx.Set("user:5:name", "Paula", nil) + tx.Set("user:6:name", "peter", nil) + tx.Set("user:7:name", "Terri", nil) + return nil +}) +``` + +Finally you can iterate over the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("names", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` +The output should be: +``` +user:2:name jane +user:4:name Janet +user:5:name Paula +user:6:name peter +user:1:name Randi +user:7:name Terri +user:0:name tom +``` + +The pattern parameter can be used to filter on keys like this: + +```go +db.CreateIndex("names", "user:*", buntdb.IndexString) +``` + +Now only items with keys that have the prefix `user:` will be added to the `names` index. + + +### Built-in types +Along with `IndexString`, there is also `IndexInt`, `IndexUint`, and `IndexFloat`. +These are built-in types for indexing. You can choose to use these or create your own. + +So to create an index that is numerically ordered on an age key, we could use: + +```go +db.CreateIndex("ages", "user:*:age", buntdb.IndexInt) +``` + +And then add values: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:age", "35", nil) + tx.Set("user:1:age", "49", nil) + tx.Set("user:2:age", "13", nil) + tx.Set("user:4:age", "63", nil) + tx.Set("user:5:age", "8", nil) + tx.Set("user:6:age", "3", nil) + tx.Set("user:7:age", "16", nil) + return nil +}) +``` + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("ages", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` + +The output should be: +``` +user:6:name 3 +user:5:name 8 +user:2:name 13 +user:7:name 16 +user:0:name 35 +user:1:name 49 +user:4:name 63 +``` + +## Spatial Indexes +BuntDB has support for spatial indexes by storing rectangles in an [R-tree](https://en.wikipedia.org/wiki/R-tree). An R-tree is organized in a similar manner as a [B-tree](https://en.wikipedia.org/wiki/B-tree), and both are balanced trees. But, an R-tree is special because it can operate on data that is in multiple dimensions. This is super handy for Geospatial applications. + +To create a spatial index use the `CreateSpatialIndex` function: + +```go +db.CreateSpatialIndex("fleet", "fleet:*:pos", buntdb.IndexRect) +``` + +Then `IndexRect` is a built-in function that converts rect strings to a format that the R-tree can use. It's easy to use this function out of the box, but you might find it better to create a custom one that renders from a different format, such as [Well-known text](https://en.wikipedia.org/wiki/Well-known_text) or [GeoJSON](http://geojson.org/). + +To add some lon,lat points to the `fleet` index: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("fleet:0:pos", "[-115.567 33.532]", nil) + tx.Set("fleet:1:pos", "[-116.671 35.735]", nil) + tx.Set("fleet:2:pos", "[-113.902 31.234]", nil) + return nil +}) +``` + +And then you can run the `Intersects` function on the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Intersects("fleet", "[-117 30],[-112 36]", func(key, val string) bool { + ... + return true + }) + return nil +}) +``` + +This will get all three positions. + +### Spatial bracket syntax + +The bracket syntax `[-117 30],[-112 36]` is unique to BuntDB, and it's how the built-in rectangles are processed. But, you are not limited to this syntax. Whatever Rect function you choose to use during `CreateSpatialIndex` will be used to process the parameter, in this case it's `IndexRect`. + +- **2D rectangle:** `[10 15],[20 25]` +*Min XY: "10x15", Max XY: "20x25"* + +- **3D rectangle:** `[10 15 12],[20 25 18]` +*Min XYZ: "10x15x12", Max XYZ: "20x25x18"* + +- **2D point:** `[10 15]` +*XY: "10x15"* + +- **LatLon point:** `[-112.2693 33.5123]` +*LatLon: "33.5123 -112.2693"* + +- **LatLon bounding box:** `[-112.26 33.51],[-112.18 33.67]` +*Min LatLon: "33.51 -112.26", Max LatLon: "33.67 -112.18"* + +**Notice:** The longitude is the Y axis and is on the left, and latitude is the X axis and is on the right. + +You can also represent `Infinity` by using `-inf` and `+inf`. +For example, you might have the following points (`[X Y M]` where XY is a point and M is a timestamp): +``` +[3 9 1] +[3 8 2] +[4 8 3] +[4 7 4] +[5 7 5] +[5 6 6] +``` + +You can then do a search for all points with `M` between 2-4 by calling `Intersects`. + +```go +tx.Intersects("points", "[-inf -inf 2],[+inf +inf 4]", func(key, val string) bool { + println(val) + return true +}) +``` + +Which will return: + +``` +[3 8 2] +[4 8 3] +[4 7 4] +``` + +## JSON Indexes +Indexes can be created on individual fields inside JSON documents. BuntDB uses [GJSON](https://github.com/tidwall/gjson) under the hood. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/buntdb" +) + +func main() { + db, _ := buntdb.Open(":memory:") + db.CreateIndex("last_name", "*", buntdb.IndexJSON("name.last")) + db.CreateIndex("age", "*", buntdb.IndexJSON("age")) + db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + return nil + }) + db.View(func(tx *buntdb.Tx) error { + fmt.Println("Order by last name") + tx.Ascend("last_name", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age") + tx.Ascend("age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age range 30-50") + tx.AscendRange("age", `{"age":30}`, `{"age":50}`, func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil + }) +} +``` + +Results: + +``` +Order by last name +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} + +Order by age +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} + +Order by age range 30-50 +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Multi Value Index +With BuntDB it's possible to join multiple values on a single index. +This is similar to a [multi column index](http://dev.mysql.com/doc/refman/5.7/en/multiple-column-indexes.html) in a traditional SQL database. + +In this example we are creating a multi value index on "name.last" and "age": + +```go +db, _ := buntdb.Open(":memory:") +db.CreateIndex("last_name_age", "*", buntdb.IndexJSON("name.last"), buntdb.IndexJSON("age")) +db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + tx.Set("5", `{"name":{"first":"Sam","last":"Anderson"},"age":51}`, nil) + tx.Set("6", `{"name":{"first":"Melinda","last":"Prichard"},"age":44}`, nil) + return nil +}) +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("last_name_age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil +}) + +// Output: +// 5: {"name":{"first":"Sam","last":"Anderson"},"age":51} +// 3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +// 4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +// 1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +// 6: {"name":{"first":"Melinda","last":"Prichard"},"age":44} +// 2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Descending Ordered Index +Any index can be put in descending order by wrapping it's less function with `buntdb.Desc`. + +```go +db.CreateIndex("last_name_age", "*", + buntdb.IndexJSON("name.last"), + buntdb.Desc(buntdb.IndexJSON("age"))) +``` + +This will create a multi value index where the last name is ascending and the age is descending. + +## Collate i18n Indexes + +Using the external [collate package](https://github.com/tidwall/collate) it's possible to create +indexes that are sorted by the specified language. This is similar to the [SQL COLLATE keyword](https://msdn.microsoft.com/en-us/library/ms174596.aspx) found in traditional databases. + +To install: + +``` +go get -u github.com/tidwall/collate +``` + +For example: + +```go +import "github.com/tidwall/collate" + +// To sort case-insensitive in French. +db.CreateIndex("name", "*", collate.IndexString("FRENCH_CI")) + +// To specify that numbers should sort numerically ("2" < "12") +// and use a comma to represent a decimal point. +db.CreateIndex("amount", "*", collate.IndexString("FRENCH_NUM")) +``` + +There's also support for Collation on JSON indexes: + +```go +db.CreateIndex("last_name", "*", collate.IndexJSON("CHINESE_CI", "name.last")) +``` + +Check out the [collate project](https://github.com/tidwall/collate) for more information. + +## Data Expiration +Items can be automatically evicted by using the `SetOptions` object in the `Set` function to set a `TTL`. + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("mykey", "myval", &buntdb.SetOptions{Expires:true, TTL:time.Second}) + return nil +}) +``` + +Now `mykey` will automatically be deleted after one second. You can remove the TTL by setting the value again with the same key/value, but with the options parameter set to nil. + +## Append-only File + +BuntDB uses an AOF (append-only file) which is a log of all database changes that occur from operations like `Set()` and `Delete()`. + +The format of this file looks like: +``` +set key:1 value1 +set key:2 value2 +set key:1 value3 +del key:2 +... +``` + +When the database opens again, it will read back the aof file and process each command in exact order. +This read process happens one time when the database opens. +From there on the file is only appended. + +As you may guess this log file can grow large over time. +There's a background routine that automatically shrinks the log file when it gets too large. +There is also a `Shrink()` function which will rewrite the aof file so that it contains only the items in the database. +The shrink operation does not lock up the database so read and write transactions can continue while shrinking is in process. + +### Durability and fsync + +By default BuntDB executes an `fsync` once every second on the [aof file](#append-only-file). Which simply means that there's a chance that up to one second of data might be lost. If you need higher durability then there's an optional database config setting `Config.SyncPolicy` which can be set to `Always`. + +The `Config.SyncPolicy` has the following options: + +- `Never` - fsync is managed by the operating system, less safe +- `EverySecond` - fsync every second, fast and safer, this is the default +- `Always` - fsync after every write, very durable, slower + +## Config + +Here are some configuration options that can be use to change various behaviors of the database. + +- **SyncPolicy** adjusts how often the data is synced to disk. This value can be Never, EverySecond, or Always. Default is EverySecond. +- **AutoShrinkPercentage** is used by the background process to trigger a shrink of the aof file when the size of the file is larger than the percentage of the result of the previous shrunk file. For example, if this value is 100, and the last shrink process resulted in a 100mb file, then the new aof file must be 200mb before a shrink is triggered. Default is 100. +- **AutoShrinkMinSize** defines the minimum size of the aof file before an automatic shrink can occur. Default is 32MB. +- **AutoShrinkDisabled** turns off automatic background shrinking. Default is false. + +To update the configuration you should call `ReadConfig` followed by `SetConfig`. For example: + +```go + +var config buntdb.Config +if err := db.ReadConfig(&config); err != nil{ + log.Fatal(err) +} +if err := db.WriteConfig(config); err != nil{ + log.Fatal(err) +} +``` + +## Performance + +How fast is BuntDB? + +Here are some example [benchmarks](https://github.com/tidwall/raft-buntdb#raftstore-performance-comparison) when using BuntDB in a Raft Store implementation. + +You can also run the standard Go benchmark tool from the project root directory: + +``` +go test --bench=. +``` + +### BuntDB-Benchmark + +There's a [custom utility](https://github.com/tidwall/buntdb-benchmark) that was created specifically for benchmarking BuntDB. + +*These are the results from running the benchmarks on a MacBook Pro 15" 2.8 GHz Intel Core i7:* + +``` +$ buntdb-benchmark -q +GET: 4609604.74 operations per second +SET: 248500.33 operations per second +ASCEND_100: 2268998.79 operations per second +ASCEND_200: 1178388.14 operations per second +ASCEND_400: 679134.20 operations per second +ASCEND_800: 348445.55 operations per second +DESCEND_100: 2313821.69 operations per second +DESCEND_200: 1292738.38 operations per second +DESCEND_400: 675258.76 operations per second +DESCEND_800: 337481.67 operations per second +SPATIAL_SET: 134824.60 operations per second +SPATIAL_INTERSECTS_100: 939491.47 operations per second +SPATIAL_INTERSECTS_200: 561590.40 operations per second +SPATIAL_INTERSECTS_400: 306951.15 operations per second +SPATIAL_INTERSECTS_800: 159673.91 operations per second +``` + +To install this utility: + +``` +go get github.com/tidwall/buntdb-benchmark +``` + + + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +BuntDB source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/buntdb/buntdb.go b/vendor/github.com/tidwall/buntdb/buntdb.go new file mode 100644 index 0000000..3a08935 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/buntdb.go @@ -0,0 +1,2062 @@ +// Package buntdb implements a low-level in-memory key/value store in pure Go. +// It persists to disk, is ACID compliant, and uses locking for multiple +// readers and a single writer. Bunt is ideal for projects that need +// a dependable database, and favor speed over data size. +package buntdb + +import ( + "bufio" + "errors" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/tidwall/btree" + "github.com/tidwall/gjson" + "github.com/tidwall/grect" + "github.com/tidwall/match" + "github.com/tidwall/rtree" +) + +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrNotFound is returned when an item or index is not in the database. + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when the database file is an invalid format. + ErrInvalid = errors.New("invalid database") + + // ErrDatabaseClosed is returned when the database is closed. + ErrDatabaseClosed = errors.New("database closed") + + // ErrIndexExists is returned when an index already exists in the database. + ErrIndexExists = errors.New("index exists") + + // ErrInvalidOperation is returned when an operation cannot be completed. + ErrInvalidOperation = errors.New("invalid operation") + + // ErrInvalidSyncPolicy is returned for an invalid SyncPolicy value. + ErrInvalidSyncPolicy = errors.New("invalid sync policy") + + // ErrShrinkInProcess is returned when a shrink operation is in-process. + ErrShrinkInProcess = errors.New("shrink is in-process") + + // ErrPersistenceActive is returned when post-loading data from an database + // not opened with Open(":memory:"). + ErrPersistenceActive = errors.New("persistence active") + + // ErrTxIterating is returned when Set or Delete are called while iterating. + ErrTxIterating = errors.New("tx is iterating") +) + +// DB represents a collection of key-value pairs that persist on disk. +// Transactions are used for all forms of data access to the DB. +type DB struct { + mu sync.RWMutex // the gatekeeper for all fields + file *os.File // the underlying file + buf []byte // a buffer to write to + keys *btree.BTree // a tree of all item ordered by key + exps *btree.BTree // a tree of items ordered by expiration + idxs map[string]*index // the index trees. + exmgr bool // indicates that expires manager is running. + flushes int // a count of the number of disk flushes + closed bool // set when the database has been closed + config Config // the database configuration + persist bool // do we write to disk + shrinking bool // when an aof shrink is in-process. + lastaofsz int // the size of the last shrink aof size +} + +// SyncPolicy represents how often data is synced to disk. +type SyncPolicy int + +const ( + // Never is used to disable syncing data to disk. + // The faster and less safe method. + Never SyncPolicy = 0 + // EverySecond is used to sync data to disk every second. + // It's pretty fast and you can lose 1 second of data if there + // is a disaster. + // This is the recommended setting. + EverySecond = 1 + // Always is used to sync data after every write to disk. + // Slow. Very safe. + Always = 2 +) + +// Config represents database configuration options. These +// options are used to change various behaviors of the database. +type Config struct { + // SyncPolicy adjusts how often the data is synced to disk. + // This value can be Never, EverySecond, or Always. + // The default is EverySecond. + SyncPolicy SyncPolicy + + // AutoShrinkPercentage is used by the background process to trigger + // a shrink of the aof file when the size of the file is larger than the + // percentage of the result of the previous shrunk file. + // For example, if this value is 100, and the last shrink process + // resulted in a 100mb file, then the new aof file must be 200mb before + // a shrink is triggered. + AutoShrinkPercentage int + + // AutoShrinkMinSize defines the minimum size of the aof file before + // an automatic shrink can occur. + AutoShrinkMinSize int + + // AutoShrinkDisabled turns off automatic background shrinking + AutoShrinkDisabled bool + + // OnExpired is used to custom handle the deletion option when a key + // has been expired. + OnExpired func(keys []string) +} + +// exctx is a simple b-tree context for ordering by expiration. +type exctx struct { + db *DB +} + +// Default number of btree degrees +const btreeDegrees = 64 + +// Open opens a database at the provided path. +// If the file does not exist then it will be created automatically. +func Open(path string) (*DB, error) { + db := &DB{} + // initialize trees and indexes + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + // initialize default configuration + db.config = Config{ + SyncPolicy: EverySecond, + AutoShrinkPercentage: 100, + AutoShrinkMinSize: 32 * 1024 * 1024, + } + // turn off persistence for pure in-memory + db.persist = path != ":memory:" + if db.persist { + var err error + // hardcoding 0666 as the default mode. + db.file, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + // load the database from disk + if err := db.load(); err != nil { + // close on error, ignore close error + _ = db.file.Close() + return nil, err + } + } + // start the background manager. + go db.backgroundManager() + return db, nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + db.closed = true + if db.persist { + db.file.Sync() // do a sync but ignore the error + if err := db.file.Close(); err != nil { + return err + } + } + // Let's release all references to nil. This will help both with debugging + // late usage panics and it provides a hint to the garbage collector + db.keys, db.exps, db.idxs, db.file = nil, nil, nil, nil + return nil +} + +// Save writes a snapshot of the database to a writer. This operation blocks all +// writes, but not reads. This can be used for snapshots and backups for pure +// in-memory databases using the ":memory:". Database that persist to disk +// can be snapshotted by simply copying the database file. +func (db *DB) Save(wr io.Writer) error { + var err error + db.mu.RLock() + defer db.mu.RUnlock() + // use a buffered writer and flush every 4MB + var buf []byte + // iterated through every item in the database and write to the buffer + db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + buf = dbi.writeSetTo(buf) + if len(buf) > 1024*1024*4 { + // flush when buffer is over 4MB + _, err = wr.Write(buf) + if err != nil { + return false + } + buf = buf[:0] + } + return true + }) + if err != nil { + return err + } + // one final flush + if len(buf) > 0 { + _, err = wr.Write(buf) + if err != nil { + return err + } + } + return nil +} + +// Load loads commands from reader. This operation blocks all reads and writes. +// Note that this can only work for fully in-memory databases opened with +// Open(":memory:"). +func (db *DB) Load(rd io.Reader) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist { + // cannot load into databases that persist to disk + return ErrPersistenceActive + } + return db.readLoad(rd, time.Now()) +} + +// index represents a b-tree or r-tree index and also acts as the +// b-tree/r-tree context for itself. +type index struct { + btr *btree.BTree // contains the items + rtr *rtree.RTree // contains the items + name string // name of the index + pattern string // a required key pattern + less func(a, b string) bool // less comparison function + rect func(item string) (min, max []float64) // rect from string function + db *DB // the origin database + opts IndexOptions // index options +} + +// match matches the pattern to the key +func (idx *index) match(key string) bool { + if idx.pattern == "*" { + return true + } + if idx.opts.CaseInsensitiveKeyMatching { + for i := 0; i < len(key); i++ { + if key[i] >= 'A' && key[i] <= 'Z' { + key = strings.ToLower(key) + break + } + } + } + return match.Match(key, idx.pattern) +} + +// clearCopy creates a copy of the index, but with an empty dataset. +func (idx *index) clearCopy() *index { + // copy the index meta information + nidx := &index{ + name: idx.name, + pattern: idx.pattern, + db: idx.db, + less: idx.less, + rect: idx.rect, + opts: idx.opts, + } + // initialize with empty trees + if nidx.less != nil { + nidx.btr = btree.New(btreeDegrees, nidx) + } + if nidx.rect != nil { + nidx.rtr = rtree.New(nidx) + } + return nidx +} + +// rebuild rebuilds the index +func (idx *index) rebuild() { + // initialize trees + if idx.less != nil { + idx.btr = btree.New(btreeDegrees, idx) + } + if idx.rect != nil { + idx.rtr = rtree.New(idx) + } + // iterate through all keys and fill the index + idx.db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + if !idx.match(dbi.key) { + // does not match the pattern, conintue + return true + } + if idx.less != nil { + idx.btr.ReplaceOrInsert(dbi) + } + if idx.rect != nil { + idx.rtr.Insert(dbi) + } + return true + }) +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +// +// Deprecated: Use Transactions +func (db *DB) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + return tx.CreateIndex(name, pattern, less...) + }) +} + +// ReplaceIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// If a previous index with the same name exists, that index will be deleted. +// +// Deprecated: Use Transactions +func (db *DB) ReplaceIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateIndex(name, pattern, less...) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateIndex(name, pattern, less...) + } + return err + } + return nil + }) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +// +// Deprecated: Use Transactions +func (db *DB) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + return tx.CreateSpatialIndex(name, pattern, rect) + }) +} + +// ReplaceSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// If a previous index with the same name exists, that index will be deleted. +// +// Deprecated: Use Transactions +func (db *DB) ReplaceSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateSpatialIndex(name, pattern, rect) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateSpatialIndex(name, pattern, rect) + } + return err + } + return nil + }) +} + +// DropIndex removes an index. +// +// Deprecated: Use Transactions +func (db *DB) DropIndex(name string) error { + return db.Update(func(tx *Tx) error { + return tx.DropIndex(name) + }) +} + +// Indexes returns a list of index names. +// +// Deprecated: Use Transactions +func (db *DB) Indexes() ([]string, error) { + var names []string + var err = db.View(func(tx *Tx) error { + var err error + names, err = tx.Indexes() + return err + }) + return names, err +} + +// ReadConfig returns the database configuration. +func (db *DB) ReadConfig(config *Config) error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + *config = db.config + return nil +} + +// SetConfig updates the database configuration. +func (db *DB) SetConfig(config Config) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + switch config.SyncPolicy { + default: + return ErrInvalidSyncPolicy + case Never, EverySecond, Always: + } + db.config = config + return nil +} + +// insertIntoDatabase performs inserts an item in to the database and updates +// all indexes. If a previous item with the same key already exists, that item +// will be replaced with the new one, and return the previous item. +func (db *DB) insertIntoDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.ReplaceOrInsert(item) + if prev != nil { + // A previous item was removed from the keys tree. Let's + // fully delete this item from all indexes. + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + if item.opts != nil && item.opts.ex { + // The new item has eviction options. Add it to the + // expires tree + db.exps.ReplaceOrInsert(item) + } + for _, idx := range db.idxs { + if !idx.match(item.key) { + continue + } + if idx.btr != nil { + // Add new item to btree index. + idx.btr.ReplaceOrInsert(item) + } + if idx.rtr != nil { + // Add new item to rtree index. + idx.rtr.Insert(item) + } + } + // we must return the previous item to the caller. + return pdbi +} + +// deleteFromDatabase removes and item from the database and indexes. The input +// item must only have the key field specified thus "&dbItem{key: key}" is all +// that is needed to fully remove the item with the matching key. If an item +// with the matching key was found in the database, it will be removed and +// returned to the caller. A nil return value means that the item was not +// found in the database +func (db *DB) deleteFromDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.Delete(item) + if prev != nil { + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + return pdbi +} + +// backgroundManager runs continuously in the background and performs various +// operations such as removing expired items and syncing to disk. +func (db *DB) backgroundManager() { + flushes := 0 + t := time.NewTicker(time.Second) + defer t.Stop() + for range t.C { + var shrink bool + // Open a standard view. This will take a full lock of the + // database thus allowing for access to anything we need. + var onExpired func([]string) + var expired []string + err := db.Update(func(tx *Tx) error { + onExpired = db.config.OnExpired + if db.persist && !db.config.AutoShrinkDisabled { + pos, err := db.file.Seek(0, 1) + if err != nil { + return err + } + aofsz := int(pos) + if aofsz > db.config.AutoShrinkMinSize { + prc := float64(db.config.AutoShrinkPercentage) / 100.0 + shrink = aofsz > db.lastaofsz+int(float64(db.lastaofsz)*prc) + } + } + // produce a list of expired items that need removing + db.exps.AscendLessThan(&dbItem{ + opts: &dbItemOpts{ex: true, exat: time.Now()}, + }, func(item btree.Item) bool { + expired = append(expired, item.(*dbItem).key) + return true + }) + if onExpired == nil { + for _, key := range expired { + if _, err := tx.Delete(key); err != nil { + // it's ok to get a "not found" because the + // 'Delete' method reports "not found" for + // expired items. + if err != ErrNotFound { + return err + } + } + } + } + return nil + }) + if err == ErrDatabaseClosed { + break + } + + // send expired event, if needed + if onExpired != nil && len(expired) > 0 { + onExpired(expired) + } + + // execute a disk sync, if needed + func() { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist && db.config.SyncPolicy == EverySecond && + flushes != db.flushes { + _ = db.file.Sync() + flushes = db.flushes + } + }() + if shrink { + if err = db.Shrink(); err != nil { + if err == ErrDatabaseClosed { + break + } + } + } + } +} + +// Shrink will make the database file smaller by removing redundant +// log entries. This operation does not block the database. +func (db *DB) Shrink() error { + db.mu.Lock() + if db.closed { + db.mu.Unlock() + return ErrDatabaseClosed + } + if !db.persist { + // The database was opened with ":memory:" as the path. + // There is no persistence, and no need to do anything here. + db.mu.Unlock() + return nil + } + if db.shrinking { + // The database is already in the process of shrinking. + db.mu.Unlock() + return ErrShrinkInProcess + } + db.shrinking = true + defer func() { + db.mu.Lock() + db.shrinking = false + db.mu.Unlock() + }() + fname := db.file.Name() + tmpname := fname + ".tmp" + // the endpos is used to return to the end of the file when we are + // finished writing all of the current items. + endpos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.mu.Unlock() + time.Sleep(time.Second / 4) // wait just a bit before starting + f, err := os.Create(tmpname) + if err != nil { + return err + } + defer func() { + _ = f.Close() + _ = os.RemoveAll(tmpname) + }() + + // we are going to read items in as chunks as to not hold up the database + // for too long. + var buf []byte + pivot := "" + done := false + for !done { + err := func() error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + done = true + var n int + db.keys.AscendGreaterOrEqual(&dbItem{key: pivot}, + func(item btree.Item) bool { + dbi := item.(*dbItem) + // 1000 items or 64MB buffer + if n > 1000 || len(buf) > 64*1024*1024 { + pivot = dbi.key + done = false + return false + } + buf = dbi.writeSetTo(buf) + n++ + return true + }, + ) + if len(buf) > 0 { + if _, err := f.Write(buf); err != nil { + return err + } + buf = buf[:0] + } + return nil + }() + if err != nil { + return err + } + } + // We reached this far so all of the items have been written to a new tmp + // There's some more work to do by appending the new line from the aof + // to the tmp file and finally swap the files out. + return func() error { + // We're wrapping this in a function to get the benefit of a defered + // lock/unlock. + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + // We are going to open a new version of the aof file so that we do + // not change the seek position of the previous. This may cause a + // problem in the future if we choose to use syscall file locking. + aof, err := os.Open(fname) + if err != nil { + return err + } + defer func() { _ = aof.Close() }() + if _, err := aof.Seek(endpos, 0); err != nil { + return err + } + // Just copy all of the new commands that have occurred since we + // started the shrink process. + if _, err := io.Copy(f, aof); err != nil { + return err + } + // Close all files + if err := aof.Close(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := db.file.Close(); err != nil { + return err + } + // Any failures below here is really bad. So just panic. + if err := os.Rename(tmpname, fname); err != nil { + panic(err) + } + db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + panic(err) + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil + }() +} + +var errValidEOF = errors.New("valid eof") + +// readLoad reads from the reader and loads commands into the database. +// modTime is the modified time of the reader, should be no greater than +// the current time.Now(). +func (db *DB) readLoad(rd io.Reader, modTime time.Time) error { + data := make([]byte, 4096) + parts := make([]string, 0, 8) + r := bufio.NewReader(rd) + for { + // read a single command. + // first we should read the number of parts that the of the command + line, err := r.ReadBytes('\n') + if err != nil { + if len(line) > 0 { + // got an eof but also data. this should be an unexpected eof. + return io.ErrUnexpectedEOF + } + if err == io.EOF { + break + } + return err + } + if line[0] != '*' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // read each part of the command. + parts = parts[:0] + for i := 0; i < n; i++ { + // read the number of bytes of the part. + line, err := r.ReadBytes('\n') + if err != nil { + return err + } + if line[0] != '$' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // resize the read buffer + if len(data) < n+2 { + dataln := len(data) + for dataln < n+2 { + dataln *= 2 + } + data = make([]byte, dataln) + } + if _, err = io.ReadFull(r, data[:n+2]); err != nil { + return err + } + if data[n] != '\r' || data[n+1] != '\n' { + return ErrInvalid + } + // copy string + parts = append(parts, string(data[:n])) + } + // finished reading the command + + if len(parts) == 0 { + continue + } + if (parts[0][0] == 's' || parts[0][1] == 'S') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 't' || parts[0][2] == 'T') { + // SET + if len(parts) < 3 || len(parts) == 4 || len(parts) > 5 { + return ErrInvalid + } + if len(parts) == 5 { + if strings.ToLower(parts[3]) != "ex" { + return ErrInvalid + } + ex, err := strconv.ParseInt(parts[4], 10, 64) + if err != nil { + return err + } + now := time.Now() + dur := (time.Duration(ex) * time.Second) - now.Sub(modTime) + if dur > 0 { + db.insertIntoDatabase(&dbItem{ + key: parts[1], + val: parts[2], + opts: &dbItemOpts{ + ex: true, + exat: now.Add(dur), + }, + }) + } + } else { + db.insertIntoDatabase(&dbItem{key: parts[1], val: parts[2]}) + } + } else if (parts[0][0] == 'd' || parts[0][1] == 'D') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 'l' || parts[0][2] == 'L') { + // DEL + if len(parts) != 2 { + return ErrInvalid + } + db.deleteFromDatabase(&dbItem{key: parts[1]}) + } else if (parts[0][0] == 'f' || parts[0][1] == 'F') && + strings.ToLower(parts[0]) == "flushdb" { + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + } else { + return ErrInvalid + } + } + return nil +} + +// load reads entries from the append only database file and fills the database. +// The file format uses the Redis append only file format, which is and a series +// of RESP commands. For more information on RESP please read +// http://redis.io/topics/protocol. The only supported RESP commands are DEL and +// SET. +func (db *DB) load() error { + fi, err := db.file.Stat() + if err != nil { + return err + } + if err := db.readLoad(db.file, fi.ModTime()); err != nil { + return err + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil +} + +// managed calls a block of code that is fully contained in a transaction. +// This method is intended to be wrapped by Update and View +func (db *DB) managed(writable bool, fn func(tx *Tx) error) (err error) { + var tx *Tx + tx, err = db.Begin(writable) + if err != nil { + return + } + defer func() { + if err != nil { + // The caller returned an error. We must rollback. + _ = tx.Rollback() + return + } + if writable { + // Everything went well. Lets Commit() + err = tx.Commit() + } else { + // read-only transaction can only roll back. + err = tx.Rollback() + } + }() + tx.funcd = true + defer func() { + tx.funcd = false + }() + err = fn(tx) + return +} + +// View executes a function within a managed read-only transaction. +// When a non-nil error is returned from the function that error will be return +// to the caller of View(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) View(fn func(tx *Tx) error) error { + return db.managed(false, fn) +} + +// Update executes a function within a managed read/write transaction. +// The transaction has been committed when no error is returned. +// In the event that an error is returned, the transaction will be rolled back. +// When a non-nil error is returned from the function, the transaction will be +// rolled back and the that error will be return to the caller of Update(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) Update(fn func(tx *Tx) error) error { + return db.managed(true, fn) +} + +// get return an item or nil if not found. +func (db *DB) get(key string) *dbItem { + item := db.keys.Get(&dbItem{key: key}) + if item != nil { + return item.(*dbItem) + } + return nil +} + +// Tx represents a transaction on the database. This transaction can either be +// read-only or read/write. Read-only transactions can be used for retrieving +// values for keys and iterating through keys and values. Read/write +// transactions can set and delete keys. +// +// All transactions must be committed or rolled-back when done. +type Tx struct { + db *DB // the underlying database. + writable bool // when false mutable operations fail. + funcd bool // when true Commit and Rollback panic. + wc *txWriteContext // context for writable transactions. +} + +type txWriteContext struct { + // rollback when deleteAll is called + rbkeys *btree.BTree // a tree of all item ordered by key + rbexps *btree.BTree // a tree of items ordered by expiration + rbidxs map[string]*index // the index trees. + + rollbackItems map[string]*dbItem // details for rolling back tx. + commitItems map[string]*dbItem // details for committing tx. + itercount int // stack of iterators + rollbackIndexes map[string]*index // details for dropped indexes. +} + +// DeleteAll deletes all items from the database. +func (tx *Tx) DeleteAll() error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + + // check to see if we've already deleted everything + if tx.wc.rbkeys == nil { + // we need to backup the live data in case of a rollback. + tx.wc.rbkeys = tx.db.keys + tx.wc.rbexps = tx.db.exps + tx.wc.rbidxs = tx.db.idxs + } + + // now reset the live database trees + tx.db.keys = btree.New(btreeDegrees, nil) + tx.db.exps = btree.New(btreeDegrees, &exctx{tx.db}) + tx.db.idxs = make(map[string]*index) + + // finally re-create the indexes + for name, idx := range tx.wc.rbidxs { + tx.db.idxs[name] = idx.clearCopy() + } + + // always clear out the commits + tx.wc.commitItems = make(map[string]*dbItem) + + return nil +} + +// Begin opens a new transaction. +// Multiple read-only transactions can be opened at the same time but there can +// only be one read/write transaction at a time. Attempting to open a read/write +// transactions while another one is in progress will result in blocking until +// the current read/write transaction is completed. +// +// All transactions must be closed by calling Commit() or Rollback() when done. +func (db *DB) Begin(writable bool) (*Tx, error) { + tx := &Tx{ + db: db, + writable: writable, + } + tx.lock() + if db.closed { + tx.unlock() + return nil, ErrDatabaseClosed + } + if writable { + // writable transactions have a writeContext object that + // contains information about changes to the database. + tx.wc = &txWriteContext{} + tx.wc.rollbackItems = make(map[string]*dbItem) + tx.wc.rollbackIndexes = make(map[string]*index) + if db.persist { + tx.wc.commitItems = make(map[string]*dbItem) + } + } + return tx, nil +} + +// lock locks the database based on the transaction type. +func (tx *Tx) lock() { + if tx.writable { + tx.db.mu.Lock() + } else { + tx.db.mu.RLock() + } +} + +// unlock unlocks the database based on the transaction type. +func (tx *Tx) unlock() { + if tx.writable { + tx.db.mu.Unlock() + } else { + tx.db.mu.RUnlock() + } +} + +// rollbackInner handles the underlying rollback logic. +// Intended to be called from Commit() and Rollback(). +func (tx *Tx) rollbackInner() { + // rollback the deleteAll if needed + if tx.wc.rbkeys != nil { + tx.db.keys = tx.wc.rbkeys + tx.db.idxs = tx.wc.rbidxs + tx.db.exps = tx.wc.rbexps + } + for key, item := range tx.wc.rollbackItems { + tx.db.deleteFromDatabase(&dbItem{key: key}) + if item != nil { + // When an item is not nil, we will need to reinsert that item + // into the database overwriting the current one. + tx.db.insertIntoDatabase(item) + } + } + for name, idx := range tx.wc.rollbackIndexes { + delete(tx.db.idxs, name) + if idx != nil { + // When an index is not nil, we will need to rebuilt that index + // this could be an expensive process if the database has many + // items or the index is complex. + tx.db.idxs[name] = idx + idx.rebuild() + } + } +} + +// Commit writes all changes to disk. +// An error is returned when a write error occurs, or when a Commit() is called +// from a read-only transaction. +func (tx *Tx) Commit() error { + if tx.funcd { + panic("managed tx commit not allowed") + } + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + var err error + if tx.db.persist && (len(tx.wc.commitItems) > 0 || tx.wc.rbkeys != nil) { + tx.db.buf = tx.db.buf[:0] + // write a flushdb if a deleteAll was called. + if tx.wc.rbkeys != nil { + tx.db.buf = append(tx.db.buf, "*1\r\n$7\r\nflushdb\r\n"...) + } + // Each committed record is written to disk + for key, item := range tx.wc.commitItems { + if item == nil { + tx.db.buf = (&dbItem{key: key}).writeDeleteTo(tx.db.buf) + } else { + tx.db.buf = item.writeSetTo(tx.db.buf) + } + } + // Flushing the buffer only once per transaction. + // If this operation fails then the write did failed and we must + // rollback. + if _, err = tx.db.file.Write(tx.db.buf); err != nil { + tx.rollbackInner() + } + if tx.db.config.SyncPolicy == Always { + _ = tx.db.file.Sync() + } + // Increment the number of flushes. The background syncing uses this. + tx.db.flushes++ + } + // Unlock the database and allow for another writable transaction. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return err +} + +// Rollback closes the transaction and reverts all mutable operations that +// were performed on the transaction such as Set() and Delete(). +// +// Read-only transactions can only be rolled back, not committed. +func (tx *Tx) Rollback() error { + if tx.funcd { + panic("managed tx rollback not allowed") + } + if tx.db == nil { + return ErrTxClosed + } + // The rollback func does the heavy lifting. + if tx.writable { + tx.rollbackInner() + } + // unlock the database for more transactions. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return nil +} + +// dbItemOpts holds various meta information about an item. +type dbItemOpts struct { + ex bool // does this item expire? + exat time.Time // when does this item expire? +} +type dbItem struct { + key, val string // the binary key and value + opts *dbItemOpts // optional meta information +} + +func appendArray(buf []byte, count int) []byte { + buf = append(buf, '*') + buf = append(buf, strconv.FormatInt(int64(count), 10)...) + buf = append(buf, '\r', '\n') + return buf +} + +func appendBulkString(buf []byte, s string) []byte { + buf = append(buf, '$') + buf = append(buf, strconv.FormatInt(int64(len(s)), 10)...) + buf = append(buf, '\r', '\n') + buf = append(buf, s...) + buf = append(buf, '\r', '\n') + return buf +} + +// writeSetTo writes an item as a single SET record to the a bufio Writer. +func (dbi *dbItem) writeSetTo(buf []byte) []byte { + if dbi.opts != nil && dbi.opts.ex { + ex := dbi.opts.exat.Sub(time.Now()) / time.Second + buf = appendArray(buf, 5) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + buf = appendBulkString(buf, "ex") + buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10)) + } else { + buf = appendArray(buf, 3) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + } + return buf +} + +// writeSetTo writes an item as a single DEL record to the a bufio Writer. +func (dbi *dbItem) writeDeleteTo(buf []byte) []byte { + buf = appendArray(buf, 2) + buf = appendBulkString(buf, "del") + buf = appendBulkString(buf, dbi.key) + return buf +} + +// expired evaluates id the item has expired. This will always return false when +// the item does not have `opts.ex` set to true. +func (dbi *dbItem) expired() bool { + return dbi.opts != nil && dbi.opts.ex && time.Now().After(dbi.opts.exat) +} + +// MaxTime from http://stackoverflow.com/questions/25065055#32620397 +// This is a long time in the future. It's an imaginary number that is +// used for b-tree ordering. +var maxTime = time.Unix(1<<63-62135596801, 999999999) + +// expiresAt will return the time when the item will expire. When an item does +// not expire `maxTime` is used. +func (dbi *dbItem) expiresAt() time.Time { + if dbi.opts == nil || !dbi.opts.ex { + return maxTime + } + return dbi.opts.exat +} + +// Less determines if a b-tree item is less than another. This is required +// for ordering, inserting, and deleting items from a b-tree. It's important +// to note that the ctx parameter is used to help with determine which +// formula to use on an item. Each b-tree should use a different ctx when +// sharing the same item. +func (dbi *dbItem) Less(item btree.Item, ctx interface{}) bool { + dbi2 := item.(*dbItem) + switch ctx := ctx.(type) { + case *exctx: + // The expires b-tree formula + if dbi2.expiresAt().After(dbi.expiresAt()) { + return true + } + if dbi.expiresAt().After(dbi2.expiresAt()) { + return false + } + case *index: + if ctx.less != nil { + // Using an index + if ctx.less(dbi.val, dbi2.val) { + return true + } + if ctx.less(dbi2.val, dbi.val) { + return false + } + } + } + // Always fall back to the key comparison. This creates absolute uniqueness. + return dbi.key < dbi2.key +} + +// Rect converts a string to a rectangle. +// An invalid rectangle will cause a panic. +func (dbi *dbItem) Rect(ctx interface{}) (min, max []float64) { + switch ctx := ctx.(type) { + case *index: + return ctx.rect(dbi.val) + } + return nil, nil +} + +// SetOptions represents options that may be included with the Set() command. +type SetOptions struct { + // Expires indicates that the Set() key-value will expire + Expires bool + // TTL is how much time the key-value will exist in the database + // before being evicted. The Expires field must also be set to true. + // TTL stands for Time-To-Live. + TTL time.Duration +} + +// GetLess returns the less function for an index. This is handy for +// doing ad-hoc compares inside a transaction. +// Returns ErrNotFound if the index is not found or there is no less +// function bound to the index +func (tx *Tx) GetLess(index string) (func(a, b string) bool, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.less == nil { + return nil, ErrNotFound + } + return idx.less, nil +} + +// GetRect returns the rect function for an index. This is handy for +// doing ad-hoc searches inside a transaction. +// Returns ErrNotFound if the index is not found or there is no rect +// function bound to the index +func (tx *Tx) GetRect(index string) (func(s string) (min, max []float64), + error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.rect == nil { + return nil, ErrNotFound + } + return idx.rect, nil +} + +// Set inserts or replaces an item in the database based on the key. +// The opt params may be used for additional functionality such as forcing +// the item to be evicted at a specified time. When the return value +// for err is nil the operation succeeded. When the return value of +// replaced is true, then the operaton replaced an existing item whose +// value will be returned through the previousValue variable. +// The results of this operation will not be available to other +// transactions until the current transaction has successfully committed. +// +// Only a writable transaction can be used with this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Set(key, value string, opts *SetOptions) (previousValue string, + replaced bool, err error) { + if tx.db == nil { + return "", false, ErrTxClosed + } else if !tx.writable { + return "", false, ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", false, ErrTxIterating + } + item := &dbItem{key: key, val: value} + if opts != nil { + if opts.Expires { + // The caller is requesting that this item expires. Convert the + // TTL to an absolute time and bind it to the item. + item.opts = &dbItemOpts{ex: true, exat: time.Now().Add(opts.TTL)} + } + } + // Insert the item into the keys tree. + prev := tx.db.insertIntoDatabase(item) + + // insert into the rollback map if there has not been a deleteAll. + if tx.wc.rbkeys == nil { + if prev == nil { + // An item with the same key did not previously exist. Let's + // create a rollback entry with a nil value. A nil value indicates + // that the entry should be deleted on rollback. When the value is + // *not* nil, that means the entry should be reverted. + tx.wc.rollbackItems[key] = nil + } else { + // A previous item already exists in the database. Let's create a + // rollback entry with the item as the value. We need to check the + // map to see if there isn't already an item that matches the + // same key. + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = prev + } + if !prev.expired() { + previousValue, replaced = prev.val, true + } + } + } + // For commits we simply assign the item to the map. We use this map to + // write the entry to disk. + if tx.db.persist { + tx.wc.commitItems[key] = item + } + return previousValue, replaced, nil +} + +// Get returns a value for a key. If the item does not exist or if the item +// has expired then ErrNotFound is returned. +func (tx *Tx) Get(key string) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } + item := tx.db.get(key) + if item == nil || item.expired() { + // The item does not exists or has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// Delete removes an item from the database based on the item's key. If the item +// does not exist or if the item has expired then ErrNotFound is returned. +// +// Only a writable transaction can be used for this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Delete(key string) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } else if !tx.writable { + return "", ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", ErrTxIterating + } + item := tx.db.deleteFromDatabase(&dbItem{key: key}) + if item == nil { + return "", ErrNotFound + } + // create a rollback entry if there has not been a deleteAll call. + if tx.wc.rbkeys == nil { + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = item + } + } + if tx.db.persist { + tx.wc.commitItems[key] = nil + } + // Even though the item has been deleted, we still want to check + // if it has expired. An expired item should not be returned. + if item.expired() { + // The item exists in the tree, but has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// TTL returns the remaining time-to-live for an item. +// A negative duration will be returned for items that do not have an +// expiration. +func (tx *Tx) TTL(key string) (time.Duration, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + item := tx.db.get(key) + if item == nil { + return 0, ErrNotFound + } else if item.opts == nil || !item.opts.ex { + return -1, nil + } + dur := item.opts.exat.Sub(time.Now()) + if dur < 0 { + return 0, ErrNotFound + } + return dur, nil +} + +// scan iterates through a specified index and calls user-defined iterator +// function for each item encountered. +// The desc param indicates that the iterator should descend. +// The gt param indicates that there is a greaterThan limit. +// The lt param indicates that there is a lessThan limit. +// The index param tells the scanner to use the specified index tree. An +// empty string for the index means to scan the keys, not the values. +// The start and stop params are the greaterThan, lessThan limits. For +// descending order, these will be lessThan, greaterThan. +// An error will be returned if the tx is closed or the index is not found. +func (tx *Tx) scan(desc, gt, lt bool, index, start, stop string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + // wrap a btree specific iterator around the user-defined iterator. + iter := func(item btree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + var tr *btree.BTree + if index == "" { + // empty index means we will use the keys tree. + tr = tx.db.keys + } else { + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + tr = idx.btr + if tr == nil { + return nil + } + } + // create some limit items + var itemA, itemB *dbItem + if gt || lt { + if index == "" { + itemA = &dbItem{key: start} + itemB = &dbItem{key: stop} + } else { + itemA = &dbItem{val: start} + itemB = &dbItem{val: stop} + } + } + // execute the scan on the underlying tree. + if tx.wc != nil { + tx.wc.itercount++ + defer func() { + tx.wc.itercount-- + }() + } + if desc { + if gt { + if lt { + tr.DescendRange(itemA, itemB, iter) + } else { + tr.DescendGreaterThan(itemA, iter) + } + } else if lt { + tr.DescendLessOrEqual(itemA, iter) + } else { + tr.Descend(iter) + } + } else { + if gt { + if lt { + tr.AscendRange(itemA, itemB, iter) + } else { + tr.AscendGreaterOrEqual(itemA, iter) + } + } else if lt { + tr.AscendLessThan(itemA, iter) + } else { + tr.Ascend(iter) + } + } + return nil +} + +// Match returns true if the specified key matches the pattern. This is a very +// simple pattern matcher where '*' matches on any number characters and '?' +// matches on any one character. +func Match(key, pattern string) bool { + return match.Match(key, pattern) +} + +// AscendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) AscendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Ascend("", iterator) + } + return tx.Ascend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.AscendGreaterOrEqual("", min, func(key, value string) bool { + if key > max { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// DescendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) DescendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Descend("", iterator) + } + return tx.Descend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.DescendLessOrEqual("", max, func(key, value string) bool { + if key < min { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// Ascend calls the iterator for every item in the database within the range +// [first, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Ascend(index string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, false, index, "", "", iterator) +} + +// AscendGreaterOrEqual calls the iterator for every item in the database within +// the range [pivot, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendGreaterOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, true, false, index, pivot, "", iterator) +} + +// AscendLessThan calls the iterator for every item in the database within the +// range [first, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendLessThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, true, index, pivot, "", iterator) +} + +// AscendRange calls the iterator for every item in the database within +// the range [greaterOrEqual, lessThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendRange(index, greaterOrEqual, lessThan string, + iterator func(key, value string) bool) error { + return tx.scan( + false, true, true, index, greaterOrEqual, lessThan, iterator, + ) +} + +// Descend calls the iterator for every item in the database within the range +// [last, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Descend(index string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, false, index, "", "", iterator) +} + +// DescendGreaterThan calls the iterator for every item in the database within +// the range [last, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendGreaterThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, true, false, index, pivot, "", iterator) +} + +// DescendLessOrEqual calls the iterator for every item in the database within +// the range [pivot, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendLessOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, true, index, pivot, "", iterator) +} + +// DescendRange calls the iterator for every item in the database within +// the range [lessOrEqual, greaterThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendRange(index, lessOrEqual, greaterThan string, + iterator func(key, value string) bool) error { + return tx.scan( + true, true, true, index, lessOrEqual, greaterThan, iterator, + ) +} + +// rect is used by Intersects +type rect struct { + min, max []float64 +} + +func (r *rect) Rect(ctx interface{}) (min, max []float64) { + return r.min, r.max +} + +// Intersects searches for rectangle items that intersect a target rect. +// The specified index must have been created by AddIndex() and the target +// is represented by the rect string. This string will be processed by the +// same bounds function that was passed to the CreateSpatialIndex() function. +// An invalid index will return an error. +func (tx *Tx) Intersects(index, bounds string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + if index == "" { + // cannot search on keys tree. just return nil. + return nil + } + // wrap a rtree specific iterator around the user-defined iterator. + iter := func(item rtree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + if idx.rtr == nil { + // not an r-tree index. just return nil + return nil + } + // execute the search + var min, max []float64 + if idx.rect != nil { + min, max = idx.rect(bounds) + } + idx.rtr.Search(&rect{min, max}, iter) + return nil +} + +// Len returns the number of items in the database +func (tx *Tx) Len() (int, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + return tx.db.keys.Len(), nil +} + +// IndexOptions provides an index with additional features or +// alternate functionality. +type IndexOptions struct { + // CaseInsensitiveKeyMatching allow for case-insensitive + // matching on keys when setting key/values. + CaseInsensitiveKeyMatching bool +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +func (tx *Tx) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, nil) +} + +// CreateIndexOptions is the same as CreateIndex except that it allows +// for additional options. +func (tx *Tx) CreateIndexOptions(name, pattern string, + opts *IndexOptions, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, opts) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +func (tx *Tx) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// CreateSpatialIndexOptions is the same as CreateSpatialIndex except that +// it allows for additional options. +func (tx *Tx) CreateSpatialIndexOptions(name, pattern string, + opts *IndexOptions, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// createIndex is called by CreateIndex() and CreateSpatialIndex() +func (tx *Tx) createIndex(name string, pattern string, + lessers []func(a, b string) bool, + rect func(item string) (min, max []float64), + opts *IndexOptions, +) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot create an index without a name. + // an empty name index is designated for the main "keys" tree. + return ErrIndexExists + } + // check if an index with that name already exists. + if _, ok := tx.db.idxs[name]; ok { + // index with name already exists. error. + return ErrIndexExists + } + // genreate a less function + var less func(a, b string) bool + switch len(lessers) { + default: + // multiple less functions specified. + // create a compound less function. + less = func(a, b string) bool { + for i := 0; i < len(lessers)-1; i++ { + if lessers[i](a, b) { + return true + } + if lessers[i](b, a) { + return false + } + } + return lessers[len(lessers)-1](a, b) + } + case 0: + // no less function + case 1: + less = lessers[0] + } + var sopts IndexOptions + if opts != nil { + sopts = *opts + } + if sopts.CaseInsensitiveKeyMatching { + pattern = strings.ToLower(pattern) + } + // intialize new index + idx := &index{ + name: name, + pattern: pattern, + less: less, + rect: rect, + db: tx.db, + opts: sopts, + } + idx.rebuild() + // save the index + tx.db.idxs[name] = idx + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use nil to indicate that the index should be removed upon rollback. + tx.wc.rollbackIndexes[name] = nil + } + } + return nil +} + +// DropIndex removes an index. +func (tx *Tx) DropIndex(name string) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot drop the default "keys" index + return ErrInvalidOperation + } + idx, ok := tx.db.idxs[name] + if !ok { + return ErrNotFound + } + // delete from the map. + // this is all that is needed to delete an index. + delete(tx.db.idxs, name) + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use a non-nil copy of the index without the data to indicate that the + // index should be rebuilt upon rollback. + tx.wc.rollbackIndexes[name] = idx.clearCopy() + } + } + return nil +} + +// Indexes returns a list of index names. +func (tx *Tx) Indexes() ([]string, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + names := make([]string, 0, len(tx.db.idxs)) + for name := range tx.db.idxs { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +// Rect is helper function that returns a string representation +// of a rect. IndexRect() is the reverse function and can be used +// to generate a rect from a string. +func Rect(min, max []float64) string { + r := grect.Rect{Min: min, Max: max} + return r.String() +} + +// Point is a helper function that converts a series of float64s +// to a rectangle for a spatial index. +func Point(coords ...float64) string { + return Rect(coords, coords) +} + +// IndexRect is a helper function that converts string to a rect. +// Rect() is the reverse function and can be used to generate a string +// from a rect. +func IndexRect(a string) (min, max []float64) { + r := grect.Get(a) + return r.Min, r.Max +} + +// IndexString is a helper function that return true if 'a' is less than 'b'. +// This is a case-insensitive comparison. Use the IndexBinary() for comparing +// case-sensitive strings. +func IndexString(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// IndexBinary is a helper function that returns true if 'a' is less than 'b'. +// This compares the raw binary of the string. +func IndexBinary(a, b string) bool { + return a < b +} + +// IndexInt is a helper function that returns true if 'a' is less than 'b'. +func IndexInt(a, b string) bool { + ia, _ := strconv.ParseInt(a, 10, 64) + ib, _ := strconv.ParseInt(b, 10, 64) + return ia < ib +} + +// IndexUint is a helper function that returns true if 'a' is less than 'b'. +// This compares uint64s that are added to the database using the +// Uint() conversion function. +func IndexUint(a, b string) bool { + ia, _ := strconv.ParseUint(a, 10, 64) + ib, _ := strconv.ParseUint(b, 10, 64) + return ia < ib +} + +// IndexFloat is a helper function that returns true if 'a' is less than 'b'. +// This compares float64s that are added to the database using the +// Float() conversion function. +func IndexFloat(a, b string) bool { + ia, _ := strconv.ParseFloat(a, 64) + ib, _ := strconv.ParseFloat(b, 64) + return ia < ib +} + +// IndexJSON provides for the ability to create an index on any JSON field. +// When the field is a string, the comparison will be case-insensitive. +// It returns a helper function used by CreateIndex. +func IndexJSON(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), false) + } +} + +// IndexJSONCaseSensitive provides for the ability to create an index on +// any JSON field. +// When the field is a string, the comparison will be case-sensitive. +// It returns a helper function used by CreateIndex. +func IndexJSONCaseSensitive(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), true) + } +} + +// Desc is a helper function that changes the order of an index. +func Desc(less func(a, b string) bool) func(a, b string) bool { + return func(a, b string) bool { return less(b, a) } +} diff --git a/vendor/github.com/tidwall/buntdb/logo.png b/vendor/github.com/tidwall/buntdb/logo.png new file mode 100644 index 0000000..01c6d75 Binary files /dev/null and b/vendor/github.com/tidwall/buntdb/logo.png differ diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 0000000..b38f920 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,369 @@ +

+GJSON +
+Build Status +GoDoc +

+ +

get a json value quickly

+ +GJSON is a Go package that provides a [very fast](#performance) and simple way to get a value from a json document. The purpose for this library it to give efficient json indexing for the [BuntDB](https://github.com/tidwall/buntdb) project. + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". This function expects that the json is well-formed and validates. Invalid json will not panic, but it may return back unexpected results. When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#[...]`, or find all matches with `#[...]#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators and the simple pattern matching `%` operator. + +``` +friends.#[last=="Murphy"].first >> "Dale" +friends.#[last=="Murphy"]#.first >> ["Dale","Jane"] +friends.#[age>45]#.last >> ["Craig","Murphy"] +friends.#[first%"D*"].last >> "Murphy" +``` + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +``` + +There are a variety of handy functions that work on a result: + +```go +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + + + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +}` +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _,name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#[lastName="Hunter"].firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool{ + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists(){ + println("has a last name") +} +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok{ + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Get multiple values at once + +The `GetMany` function can be used to get multiple values at the same time, and is optimized to scan over a JSON payload once. + +```go +results := gjson.GetMany(json, "name.first", "name.last", "age") +``` + +The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +and [jsonparser](https://github.com/buger/jsonparser) + +``` +BenchmarkGJSONGet-8 15000000 333 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-8 900000 4188 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-8 600000 8908 ns/op 3048 B/op 69 allocs/op +BenchmarkJSONUnmarshalStruct-8 600000 9026 ns/op 1832 B/op 69 allocs/op +BenchmarkJSONDecoder-8 300000 14339 ns/op 4224 B/op 184 allocs/op +BenchmarkFFJSONLexer-8 1500000 3156 ns/op 896 B/op 8 allocs/op +BenchmarkEasyJSONLexer-8 3000000 938 ns/op 613 B/op 6 allocs/op +BenchmarkJSONParserGet-8 3000000 442 ns/op 21 B/op 0 allocs/op +``` + +Benchmarks for the `GetMany` function: + +``` +BenchmarkGJSONGetMany4Paths-8 4000000 319 ns/op 112 B/op 0 allocs/op +BenchmarkGJSONGetMany8Paths-8 8000000 218 ns/op 56 B/op 0 allocs/op +BenchmarkGJSONGetMany16Paths-8 16000000 160 ns/op 56 B/op 0 allocs/op +BenchmarkGJSONGetMany32Paths-8 32000000 130 ns/op 64 B/op 0 allocs/op +BenchmarkGJSONGetMany64Paths-8 64000000 117 ns/op 64 B/op 0 allocs/op +BenchmarkGJSONGetMany128Paths-8 128000000 109 ns/op 64 B/op 0 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated though one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +For the `GetMany` benchmarks these paths are used: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +widget.window.title +widget.image.alignment +widget.text.style +widget.window.height +widget.image.src +widget.text.data +widget.text.size +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GJSON source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 0000000..9b28df2 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,1946 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "reflect" + "strconv" + + // It's totally safe to use this package, but in case your + // project or organization restricts the use of 'unsafe', + // there's the "github.com/tidwall/gjson-safe" package. + "unsafe" + + "github.com/tidwall/match" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "null" + case False: + return "false" + case Number: + return strconv.FormatFloat(t.Num, 'f', -1, 64) + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + return t.Str != "" && t.Str != "0" + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseInt(t.Str, 10, 64) + return n + case Number: + return int64(t.Num) + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseUint(t.Str, 10, 64) + return n + case Number: + return uint64(t.Num) + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Array returns back an array of values. +// If the result represents a non-existent value, then an empty array will be returned. +// If the result is not a JSON array, the return value will be an array containing one result. +func (t Result) Array() []Result { + if !t.Exists() { + return nil + } + if t.Type != JSON { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be iterated. +// If the result is an Object, the iterator will pass the key and value of each item. +// If the result is an Array, the iterator will only pass the value of each item. +// If the result is not a JSON array or object, the iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var keys bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + keys = true + break + } else if json[i] == '[' { + i++ + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + for ; i < len(json); i++ { + if keys { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + value.Index = s + if !iterator(key, value) { + return + } + } +} + +// Map returns back an map of values. The result should be a JSON array. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + return Get(t.Raw, path) +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + r.oi[key.Str] = value.Value() + } else { + r.o[key.Str] = value + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + return +} + +// Parse parses the json and returns a result. +func Parse(json string) Result { + var value Result + for i := 0; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + return Result{} + } + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + break + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' has already been read + depth := 1 + for i := 1; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[': + depth++ + case '}', ']': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + continue + } + if json[i] < ']' { + // probably a valid number + continue + } + if json[i] == 'e' || json[i] == 'E' { + // allow for exponential numbers + continue + } + // likely a ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] <= 'a' || json[i] >= 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + path string + op string + value string + all bool + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '.' { + r.part = path[:i] + r.path = path[i+1:] + r.more = true + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' { + r.query.on = true + // query + i += 2 + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s := i + for ; i < len(path); i++ { + if path[i] <= ' ' || + path[i] == '!' || + path[i] == '=' || + path[i] == '<' || + path[i] == '>' || + path[i] == '%' || + path[i] == ']' { + break + } + } + r.query.path = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + if i < len(path) { + s = i + if path[i] == '!' { + if i < len(path)-1 && path[i+1] == '=' { + i++ + } + } else if path[i] == '<' || path[i] == '>' { + if i < len(path)-1 && path[i+1] == '=' { + i++ + } + } else if path[i] == '=' { + if i < len(path)-1 && path[i+1] == '=' { + s++ + i++ + } + } + i++ + r.query.op = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s = i + for ; i < len(path); i++ { + if path[i] == '"' { + i++ + s2 := i + for ; i < len(path); i++ { + if path[i] > '\\' { + continue + } + if path[i] == '"' { + // look for an escaped slash + if path[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if path[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + } else if path[i] == ']' { + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + break + } + } + if i > len(path) { + i = len(path) + } + v := path[s:i] + for len(v) > 0 && v[len(v)-1] <= ' ' { + v = v[:len(v)-1] + } + r.query.value = v + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +type objectPathResult struct { + part string + path string + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '.' { + r.part = path[:i] + r.path = path[i+1:] + r.more = true + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + r.path = path[i+1:] + r.more = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[': + depth++ + case '}', ']': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + i, key, kesc, ok = i, c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = match.Match(unescape(key), rp.part) + } else { + pmatch = match.Match(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + } + break + } + } + return i, false +} +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 2 && rpv[0] == '"' && rpv[len(rpv)-1] == '"' { + rpv = rpv[1 : len(rpv)-1] + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return match.Match(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num == rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + rp := parseArrayPath(path) + if !rp.arrch { + n, err := strconv.ParseUint(rp.part, 10, 64) + if err != nil { + partidx = -1 + } else { + partidx = int(n) + } + } + for i < len(c.json) { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; i < len(c.json); i++ { + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + res := Get(val, rp.query.path) + if queryMatches(&rp, res) { + if rp.more { + res = Get(val, rp.path) + } else { + res = Result{Raw: val, Type: JSON} + } + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } else { + multires = append(multires, ',') + } + multires = append(multires, res.Raw...) + } else { + c.value = res + return i, true + } + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + res := Get(c.json[alog[j]:], rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + jsons = append(jsons, []byte(res.Raw)...) + k++ + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + return i + 1, true + } else { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num = float64(h - 1) + c.calcd = true + return i + 1, true + } + } + if len(multires) > 0 && !c.value.Exists() { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + } + } + return i + 1, false + } + break + } + } + return i, false +} + +type parseContext struct { + json string + value Result + calcd bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// When the value is found it's returned immediately. +// +// A path is a series of keys searated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +func Get(json, path string) Result { + var i int + var c = &parseContext{json: json} + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*reflect.StringHeader)(unsafe.Pointer(&json)) + rhdr := *(*reflect.StringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(rhdr.Data - jhdr.Data) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } + return c.value +} +func fromBytesGet(result Result) Result { + // safely get the string headers + rawhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := reflect.SliceHeader{Data: rawhi.Data, Len: rawhi.Len} + strh := reflect.SliceHeader{Data: strhi.Data, Len: strhi.Len} + if strh.Data == 0 { + // str is nil + if rawh.Data == 0 { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.Data == 0 { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if strh.Data >= rawh.Data && + int(strh.Data)+strh.Len <= int(rawh.Data)+rawh.Len { + // Str is a substring of Raw. + start := int(strh.Data - rawh.Data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+strh.Len] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + return result +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + result = fromBytesGet(result) + } + return result +} + +// unescape unescapes a string +func unescape(json string) string { //, error) { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return "" //, errors.New("invalid character in string") + case json[i] == '\\': + i++ + if i >= len(json) { + return "" //, errors.New("invalid escape sequence") + } + switch json[i] { + default: + return "" //, errors.New("invalid escape sequence") + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return "" //, errors.New("invalid escape sequence") + } + i++ + // extract the codepoint + var code int + for j := i; j < i+4; j++ { + switch { + default: + return "" //, errors.New("invalid escape sequence") + case json[j] >= '0' && json[j] <= '9': + code += (int(json[j]) - '0') << uint(12-(j-i)*4) + case json[j] >= 'a' && json[j] <= 'f': + code += (int(json[j]) - 'a' + 10) << uint(12-(j-i)*4) + case json[j] >= 'a' && json[j] <= 'f': + code += (int(json[j]) - 'a' + 10) << uint(12-(j-i)*4) + } + } + str = append(str, []byte(string(code))...) + i += 3 // only 3 because we will increment on the for-loop + } + } + } + return string(str) //, nil +} + +// Less return true if a token is less than another token. +// The caseSensitive paramater is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +// +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + return i, res, true + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + case 't', 'f', 'n': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + } + } + return i, res, false +} + +var ( // used for testing + testWatchForFallback bool + testLastWasFallback bool +) + +// areSimplePaths returns true if all the paths are simple enough +// to parse quickly for GetMany(). Allows alpha-numeric, dots, +// underscores, and the dollar sign. It does not allow non-alnum, +// escape characters, or keys which start with a numbers. +// For example: +// "name.last" == OK +// "user.id0" == OK +// "user.ID" == OK +// "user.first_name" == OK +// "user.firstName" == OK +// "user.0item" == BAD +// "user.#id" == BAD +// "user\.name" == BAD +func areSimplePaths(paths []string) bool { + for _, path := range paths { + var fi int // first key index, for keys with numeric prefix + for i := 0; i < len(path); i++ { + if path[i] >= 'a' && path[i] <= 'z' { + // a-z is likely to be the highest frequency charater. + continue + } + if path[i] == '.' { + fi = i + 1 + continue + } + if path[i] >= 'A' && path[i] <= 'Z' { + continue + } + if path[i] == '_' || path[i] == '$' { + continue + } + if i > fi && path[i] >= '0' && path[i] <= '9' { + continue + } + return false + } + } + return true +} + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, paths ...string) []Result { + if len(paths) < 4 { + if testWatchForFallback { + testLastWasFallback = false + } + switch len(paths) { + case 0: + // return nil when no paths are specified. + return nil + case 1: + return []Result{Get(json, paths[0])} + case 2: + return []Result{Get(json, paths[0]), Get(json, paths[1])} + case 3: + return []Result{Get(json, paths[0]), Get(json, paths[1]), Get(json, paths[2])} + } + } + var results []Result + var ok bool + var i int + if len(paths) > 512 { + // we can only support up to 512 paths. Is that too many? + goto fallback + } + if !areSimplePaths(paths) { + // If there is even one path that is not considered "simple" then + // we need to use the fallback method. + goto fallback + } + // locate the object token. + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + break + } + if json[i] <= ' ' { + continue + } + goto fallback + } + // use the call function table. + if len(paths) <= 8 { + results, ok = getMany8(json, i, paths) + } else if len(paths) <= 16 { + results, ok = getMany16(json, i, paths) + } else if len(paths) <= 32 { + results, ok = getMany32(json, i, paths) + } else if len(paths) <= 64 { + results, ok = getMany64(json, i, paths) + } else if len(paths) <= 128 { + results, ok = getMany128(json, i, paths) + } else if len(paths) <= 256 { + results, ok = getMany256(json, i, paths) + } else if len(paths) <= 512 { + results, ok = getMany512(json, i, paths) + } + if !ok { + // there was some fault while parsing. we should try the + // fallback method. This could result in performance + // degregation in some cases. + goto fallback + } + if testWatchForFallback { + testLastWasFallback = false + } + return results +fallback: + results = results[:0] + for i := 0; i < len(paths); i++ { + results = append(results, Get(json, paths[i])) + } + if testWatchForFallback { + testLastWasFallback = true + } + return results +} + +// GetManyBytes searches json for the specified path. +// If working with bytes, this method preferred over +// GetMany(string(data), paths...) +func GetManyBytes(json []byte, paths ...string) []Result { + if json == nil { + return GetMany("", paths...) + } + results := GetMany(*(*string)(unsafe.Pointer(&json)), paths...) + for i := range results { + results[i] = fromBytesGet(results[i]) + } + return results +} + +// parseGetMany parses a json object for keys that match against the callers +// paths. It's a best-effort attempt and quickly locating and assigning the +// values to the []Result array. If there are failures such as bad json, or +// invalid input paths, or too much recursion, the function will exit with a +// return value of 'false'. +func parseGetMany( + json string, i int, + level uint, kplen int, + paths []string, completed []bool, matches []uint64, results []Result, +) (int, bool) { + if level > 62 { + // The recursion level is limited because the matches []uint64 + // array cannot handle more the 64-bits. + return i, false + } + // At this point the last character read was a '{'. + // Read all object keys and try to match against the paths. + var key string + var val string + var vesc, ok bool +next_key: + for ; i < len(json); i++ { + if json[i] == '"' { + // read the key + i, val, vesc, ok = parseString(json, i+1) + if !ok { + return i, false + } + if vesc { + // the value is escaped + key = unescape(val[1 : len(val)-1]) + } else { + // just a plain old ascii key + key = val[1 : len(val)-1] + } + var hasMatch bool + var parsedVal bool + var valOrgIndex int + var valPathIndex int + for j := 0; j < len(key); j++ { + if key[j] == '.' { + // we need to look for keys with dot and ignore them. + if i, _, ok = parseAny(json, i, false); !ok { + return i, false + } + continue next_key + } + } + var usedPaths int + // loop through paths and look for matches + for j := 0; j < len(paths); j++ { + if completed[j] { + usedPaths++ + // ignore completed paths + continue + } + if level > 0 && (matches[j]>>(level-1))&1 == 0 { + // ignore unmatched paths + usedPaths++ + continue + } + + // try to match the key to the path + // this is spaghetti code but the idea is to minimize + // calls and variable assignments when comparing the + // key to paths + if len(paths[j])-kplen >= len(key) { + i, k := kplen, 0 + for ; k < len(key); k, i = k+1, i+1 { + if key[k] != paths[j][i] { + // no match + goto nomatch + } + } + if i < len(paths[j]) { + if paths[j][i] == '.' { + // matched, but there still more keys in the path + goto match_not_atend + } + } + // matched and at the end of the path + goto match_atend + } + // no match, jump to the nomatch label + goto nomatch + match_atend: + // found a match + // at the end of the path. we must take the value. + usedPaths++ + if !parsedVal { + // the value has not been parsed yet. let's do so. + valOrgIndex = i // keep track of the current position. + i, results[j], ok = parseAny(json, i, true) + if !ok { + return i, false + } + parsedVal = true + valPathIndex = j + } else { + results[j] = results[valPathIndex] + } + // mark as complete + completed[j] = true + // jump over the match_not_atend label + goto nomatch + match_not_atend: + // found a match + // still in the middle of the path. + usedPaths++ + // mark the path as matched + matches[j] |= 1 << level + if !hasMatch { + hasMatch = true + } + nomatch: // noop label + } + + if !parsedVal { + if hasMatch { + // we found a match and the value has not been parsed yet. + // let's find out if the next value type is an object. + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ':' { + continue + } + break + } + if i < len(json) { + if json[i] == '{' { + // it's an object. let's go deeper + i, ok = parseGetMany(json, i+1, level+1, kplen+len(key)+1, paths, completed, matches, results) + if !ok { + return i, false + } + } else { + // not an object. just parse and ignore. + if i, _, ok = parseAny(json, i, false); !ok { + return i, false + } + } + } + } else { + // Since there was no matches we can just parse the value and + // ignore the result. + if i, _, ok = parseAny(json, i, false); !ok { + return i, false + } + } + } else if hasMatch && len(results[valPathIndex].Raw) > 0 && results[valPathIndex].Raw[0] == '{' { + // The value was already parsed and the value type is an object. + // Rewind the json index and let's parse deeper. + i = valOrgIndex + for ; i < len(json); i++ { + if json[i] == '{' { + break + } + } + i, ok = parseGetMany(json, i+1, level+1, kplen+len(key)+1, paths, completed, matches, results) + if !ok { + return i, false + } + } + if usedPaths == len(paths) { + // all paths have been used, either completed or matched. + // we should stop parsing this object to save CPU cycles. + if level > 0 && i < len(json) { + i, _ = parseSquash(json, i) + } + return i, true + } + } else if json[i] == '}' { + // reached the end of the object. end it here. + return i + 1, true + } + } + return i, true +} + +// Call table for GetMany. Using an isolated function allows for allocating +// arrays with know capacities on the stack, as opposed to dynamically +// allocating on the heap. This can provide a tremendous performance boost +// by avoiding the GC. +func getMany8(json string, i int, paths []string) ([]Result, bool) { + const max = 8 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany16(json string, i int, paths []string) ([]Result, bool) { + const max = 16 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany32(json string, i int, paths []string) ([]Result, bool) { + const max = 32 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany64(json string, i int, paths []string) ([]Result, bool) { + const max = 64 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany128(json string, i int, paths []string) ([]Result, bool) { + const max = 128 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany256(json string, i int, paths []string) ([]Result, bool) { + const max = 256 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany512(json string, i int, paths []string) ([]Result, bool) { + const max = 512 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png new file mode 100644 index 0000000..17a8bbe Binary files /dev/null and b/vendor/github.com/tidwall/gjson/logo.png differ diff --git a/vendor/github.com/tidwall/grect/LICENSE.md b/vendor/github.com/tidwall/grect/LICENSE.md new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/grect/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/grect/README.md b/vendor/github.com/tidwall/grect/README.md new file mode 100644 index 0000000..04a8bf0 --- /dev/null +++ b/vendor/github.com/tidwall/grect/README.md @@ -0,0 +1,25 @@ +GRECT +==== + +Quickly get the outer rectangle for GeoJSON, WKT, WKB. + +```go + r := grect.Get(`{ + "type": "Polygon", + "coordinates": [ + [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], + [100.0, 1.0], [100.0, 0.0] ] + ] + }`) + fmt.Printf("%v %v\n", r.Min, r.Max) + // Output: + // [100 0] [101 1] +``` + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GRECT source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/grect/grect.go b/vendor/github.com/tidwall/grect/grect.go new file mode 100644 index 0000000..13eb761 --- /dev/null +++ b/vendor/github.com/tidwall/grect/grect.go @@ -0,0 +1,337 @@ +package grect + +import ( + "strconv" + "strings" + + "github.com/tidwall/gjson" +) + +type Rect struct { + Min, Max []float64 +} + +func (r Rect) String() string { + diff := len(r.Min) != len(r.Max) + if !diff { + for i := 0; i < len(r.Min); i++ { + if r.Min[i] != r.Max[i] { + diff = true + break + } + } + } + var buf []byte + buf = append(buf, '[') + for i, v := range r.Min { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + if diff { + buf = append(buf, ']', ',', '[') + for i, v := range r.Max { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + } + buf = append(buf, ']') + return string(buf) +} + +func normalize(min, max []float64) (nmin, nmax []float64) { + if len(max) == 0 { + return min, min + } else if len(max) != len(min) { + if len(max) < len(min) { + max = append(max, min[len(max):]...) + } else if len(min) < len(max) { + min = append(min, max[len(min):]...) + } + } + match := true + for i := 0; i < len(min); i++ { + if min[i] != max[i] { + if match { + match = false + } + if min[i] > max[i] { + min[i], max[i] = max[i], min[i] + } + } + } + if match { + return min, min + } + return min, max +} + +func Get(s string) Rect { + var i int + var ws bool + var min, max []float64 + for ; i < len(s); i++ { + switch s[i] { + default: + continue + case ' ', '\t', '\r', '\n': + ws = true + continue + case '[': + min, max, i = getRect(s, i) + case '{': + min, max, i = getGeoJSON(s, i) + case 0x00, 0x01: + if !ws { + // return parseWKB(s, i) + } + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + min, max, i = getWKT(s, i) + } + break + } + min, max = normalize(min, max) + return Rect{Min: min, Max: max} +} + +func getRect(s string, i int) (min, max []float64, ri int) { + a := s[i:] + parts := strings.Split(a, ",") + for i := 0; i < len(parts) && i < 2; i++ { + part := parts[i] + if len(part) > 0 && (part[0] <= ' ' || part[len(part)-1] <= ' ') { + part = strings.TrimSpace(part) + } + if len(part) >= 2 && part[0] == '[' && part[len(part)-1] == ']' { + pieces := strings.Split(part[1:len(part)-1], " ") + if i == 0 { + min = make([]float64, 0, len(pieces)) + } else { + max = make([]float64, 0, len(pieces)) + } + for j := 0; j < len(pieces); j++ { + piece := pieces[j] + if piece != "" { + n, _ := strconv.ParseFloat(piece, 64) + if i == 0 { + min = append(min, n) + } else { + max = append(max, n) + } + } + } + } + } + + // normalize + if len(parts) == 1 { + max = min + } else { + min, max = normalize(min, max) + } + + return min, max, len(s) +} + +func union(min1, max1, min2, max2 []float64) (umin, umax []float64) { + for i := 0; i < len(min1) || i < len(min2); i++ { + if i >= len(min1) { + // just copy min2 + umin = append(umin, min2[i]) + umax = append(umax, max2[i]) + } else if i >= len(min2) { + // just copy min1 + umin = append(umin, min1[i]) + umax = append(umax, max1[i]) + } else { + if min1[i] < min2[i] { + umin = append(umin, min1[i]) + } else { + umin = append(umin, min2[i]) + } + if max1[i] > max2[i] { + umax = append(umax, max1[i]) + } else { + umax = append(umax, max2[i]) + } + } + } + return umin, umax +} + +func getWKT(s string, i int) (min, max []float64, ri int) { + switch s[i] { + default: + for ; i < len(s); i++ { + if s[i] == ',' { + return nil, nil, i + } + if s[i] == '(' { + return getWKTAny(s, i) + } + } + return nil, nil, i + case 'g', 'G': + if len(s)-i < 18 { + return nil, nil, i + } + return getWKTGeometryCollection(s, i+18) + } +} + +func getWKTAny(s string, i int) (min, max []float64, ri int) { + min, max = make([]float64, 0, 4), make([]float64, 0, 4) + var depth int + var ni int + var idx int +loop: + for ; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '(': + depth++ + case ')', ' ', '\t', '\r', '\n', ',': + if ni != 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + idx++ + ni = 0 + } + switch s[i] { + case ')': + idx = 0 + depth-- + if depth == 0 { + i++ + break loop + } + case ',': + idx = 0 + } + } + } + return min, max, i +} + +func getWKTGeometryCollection(s string, i int) (min, max []float64, ri int) { + var depth int + for ; i < len(s); i++ { + if s[i] == ',' || s[i] == ')' { + // do not increment the index + return nil, nil, i + } + if s[i] == '(' { + depth++ + i++ + break + } + } +next: + for ; i < len(s); i++ { + switch s[i] { + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + var min2, max2 []float64 + min2, max2, i = getWKT(s, i) + min, max = union(min, max, min2, max2) + for ; i < len(s); i++ { + if s[i] == ',' { + i++ + goto next + } + if s[i] == ')' { + i++ + goto done + } + } + case ' ', '\t', '\r', '\n': + continue + default: + goto end_early + } + } +end_early: + // just balance the parens + for ; i < len(s); i++ { + if s[i] == '(' { + depth++ + } else if s[i] == ')' { + depth-- + if depth == 0 { + i++ + break + } + } + } +done: + return min, max, i +} +func getGeoJSON(s string, i int) (min, max []float64, ri int) { + json := s[i:] + switch gjson.Get(json, "type").String() { + default: + min, max = getMinMaxBrackets(gjson.Get(json, "coordinates").Raw) + case "Feature": + min, max, _ = getGeoJSON(gjson.Get(json, "geometry").String(), 0) + case "FeatureCollection": + for _, json := range gjson.Get(json, "features").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + case "GeometryCollection": + for _, json := range gjson.Get(json, "geometries").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + } + return min, max, len(json) +} + +func getMinMaxBrackets(s string) (min, max []float64) { + var ni int + var idx int + for i := 0; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '[', ',', ']', ' ', '\t', '\r', '\n': + if ni > 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + ni = 0 + idx++ + } + if s[i] == ']' { + idx = 0 + } + + } + } + + return +} diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/match/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md new file mode 100644 index 0000000..04b0aaa --- /dev/null +++ b/vendor/github.com/tidwall/match/README.md @@ -0,0 +1,31 @@ +Match +===== +Build Status +GoDoc + +Match is a very simple pattern matcher where '*' matches on any +number characters and '?' matches on any one character. +Installing +---------- + +``` +go get -u github.com/tidwall/match +``` + +Example +------- + +```go +match.Match("hello", "*llo") +match.Match("jello", "?ello") +match.Match("hello", "h*o") +``` + + +Contact +------- +Josh Baker [@tidwall](http://twitter.com/tidwall) + +License +------- +Redcon source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go new file mode 100644 index 0000000..8885add --- /dev/null +++ b/vendor/github.com/tidwall/match/match.go @@ -0,0 +1,192 @@ +// Match provides a simple pattern matcher with unicode support. +package match + +import "unicode/utf8" + +// Match returns true if str matches pattern. This is a very +// simple wildcard match where '*' matches on any number characters +// and '?' matches on any one character. + +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// c matches character c (c != '*', '?', '\\') +// '\\' c matches character c +// +func Match(str, pattern string) bool { + if pattern == "*" { + return true + } + return deepMatch(str, pattern) +} +func deepMatch(str, pattern string) bool { + for len(pattern) > 0 { + if pattern[0] > 0x7f { + return deepMatchRune(str, pattern) + } + switch pattern[0] { + default: + if len(str) == 0 { + return false + } + if str[0] > 0x7f { + return deepMatchRune(str, pattern) + } + if str[0] != pattern[0] { + return false + } + case '?': + if len(str) == 0 { + return false + } + case '*': + return deepMatch(str, pattern[1:]) || + (len(str) > 0 && deepMatch(str[1:], pattern)) + } + str = str[1:] + pattern = pattern[1:] + } + return len(str) == 0 && len(pattern) == 0 +} + +func deepMatchRune(str, pattern string) bool { + var sr, pr rune + var srsz, prsz int + + // read the first rune ahead of time + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + for pr != utf8.RuneError { + switch pr { + default: + if srsz == utf8.RuneError { + return false + } + if sr != pr { + return false + } + case '?': + if srsz == utf8.RuneError { + return false + } + case '*': + return deepMatchRune(str, pattern[prsz:]) || + (srsz > 0 && deepMatchRune(str[srsz:], pattern)) + } + str = str[srsz:] + pattern = pattern[prsz:] + // read the next runes + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + } + + return srsz == 0 && prsz == 0 +} + +var maxRuneBytes = func() []byte { + b := make([]byte, 4) + if utf8.EncodeRune(b, '\U0010FFFF') != 4 { + panic("invalid rune encoding") + } + return b +}() + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) + /* + return + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + infinite = true + } else { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb), infinite + */ +} diff --git a/vendor/github.com/tidwall/rtree/LICENSE b/vendor/github.com/tidwall/rtree/LICENSE new file mode 100644 index 0000000..1a6cb67 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tidwall/rtree/README.md b/vendor/github.com/tidwall/rtree/README.md new file mode 100644 index 0000000..fa52dc3 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/README.md @@ -0,0 +1,21 @@ +RTree implementation for Go +=========================== + +[![Build Status](https://travis-ci.org/tidwall/rtree.svg?branch=master)](https://travis-ci.org/tidwall/rtree) +[![GoDoc](https://godoc.org/github.com/tidwall/rtree?status.svg)](https://godoc.org/github.com/tidwall/rtree) + +This package provides an in-memory R-Tree implementation for Go, useful as a spatial data structure. +It has support for 1-20 dimensions, and can store and search multidimensions interchangably in the same tree. + +Authors +------- +* 1983 Original algorithm and test code by Antonin Guttman and Michael Stonebraker, UC Berkely +* 1994 ANCI C ported from original test code by Melinda Green +* 1995 Sphere volume fix for degeneracy problem submitted by Paul Brook +* 2004 Templated C++ port by Greg Douglas +* 2016 Go port by Josh Baker + +License +------- +RTree source code is available under the MIT License. + diff --git a/vendor/github.com/tidwall/rtree/rtree.go b/vendor/github.com/tidwall/rtree/rtree.go new file mode 100644 index 0000000..330a1f5 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/rtree.go @@ -0,0 +1,14013 @@ +// generated; DO NOT EDIT! + +package rtree + +import "math" + +type Iterator func(item Item) bool +type Item interface { + Rect(ctx interface{}) (min []float64, max []float64) +} + +type RTree struct { + ctx interface{} + tr1 *d1RTree + tr2 *d2RTree + tr3 *d3RTree + tr4 *d4RTree + tr5 *d5RTree + tr6 *d6RTree + tr7 *d7RTree + tr8 *d8RTree + tr9 *d9RTree + tr10 *d10RTree + tr11 *d11RTree + tr12 *d12RTree + tr13 *d13RTree + tr14 *d14RTree + tr15 *d15RTree + tr16 *d16RTree + tr17 *d17RTree + tr18 *d18RTree + tr19 *d19RTree + tr20 *d20RTree +} + +func New(ctx interface{}) *RTree { + return &RTree{ + ctx: ctx, + tr1: d1New(), + tr2: d2New(), + tr3: d3New(), + tr4: d4New(), + tr5: d5New(), + tr6: d6New(), + tr7: d7New(), + tr8: d8New(), + tr9: d9New(), + tr10: d10New(), + tr11: d11New(), + tr12: d12New(), + tr13: d13New(), + tr14: d14New(), + tr15: d15New(), + tr16: d16New(), + tr17: d17New(), + tr18: d18New(), + tr19: d19New(), + tr20: d20New(), + } +} + +func (tr *RTree) Insert(item Item) { + if item == nil { + panic("nil item being added to RTree") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + switch len(min) { + default: + return // just return + panic("invalid dimension") + case 1: + var amin, amax [1]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr1.Insert(amin, amax, item) + case 2: + var amin, amax [2]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr2.Insert(amin, amax, item) + case 3: + var amin, amax [3]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr3.Insert(amin, amax, item) + case 4: + var amin, amax [4]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr4.Insert(amin, amax, item) + case 5: + var amin, amax [5]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr5.Insert(amin, amax, item) + case 6: + var amin, amax [6]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr6.Insert(amin, amax, item) + case 7: + var amin, amax [7]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr7.Insert(amin, amax, item) + case 8: + var amin, amax [8]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr8.Insert(amin, amax, item) + case 9: + var amin, amax [9]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr9.Insert(amin, amax, item) + case 10: + var amin, amax [10]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr10.Insert(amin, amax, item) + case 11: + var amin, amax [11]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr11.Insert(amin, amax, item) + case 12: + var amin, amax [12]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr12.Insert(amin, amax, item) + case 13: + var amin, amax [13]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr13.Insert(amin, amax, item) + case 14: + var amin, amax [14]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr14.Insert(amin, amax, item) + case 15: + var amin, amax [15]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr15.Insert(amin, amax, item) + case 16: + var amin, amax [16]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr16.Insert(amin, amax, item) + case 17: + var amin, amax [17]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr17.Insert(amin, amax, item) + case 18: + var amin, amax [18]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr18.Insert(amin, amax, item) + case 19: + var amin, amax [19]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr19.Insert(amin, amax, item) + case 20: + var amin, amax [20]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr20.Insert(amin, amax, item) + } +} + +func (tr *RTree) Remove(item Item) { + if item == nil { + panic("nil item being added to RTree") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + switch len(min) { + default: + return // just return + panic("invalid dimension") + case 1: + var amin, amax [1]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr1.Remove(amin, amax, item) + case 2: + var amin, amax [2]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr2.Remove(amin, amax, item) + case 3: + var amin, amax [3]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr3.Remove(amin, amax, item) + case 4: + var amin, amax [4]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr4.Remove(amin, amax, item) + case 5: + var amin, amax [5]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr5.Remove(amin, amax, item) + case 6: + var amin, amax [6]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr6.Remove(amin, amax, item) + case 7: + var amin, amax [7]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr7.Remove(amin, amax, item) + case 8: + var amin, amax [8]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr8.Remove(amin, amax, item) + case 9: + var amin, amax [9]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr9.Remove(amin, amax, item) + case 10: + var amin, amax [10]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr10.Remove(amin, amax, item) + case 11: + var amin, amax [11]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr11.Remove(amin, amax, item) + case 12: + var amin, amax [12]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr12.Remove(amin, amax, item) + case 13: + var amin, amax [13]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr13.Remove(amin, amax, item) + case 14: + var amin, amax [14]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr14.Remove(amin, amax, item) + case 15: + var amin, amax [15]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr15.Remove(amin, amax, item) + case 16: + var amin, amax [16]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr16.Remove(amin, amax, item) + case 17: + var amin, amax [17]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr17.Remove(amin, amax, item) + case 18: + var amin, amax [18]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr18.Remove(amin, amax, item) + case 19: + var amin, amax [19]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr19.Remove(amin, amax, item) + case 20: + var amin, amax [20]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr20.Remove(amin, amax, item) + } +} +func (tr *RTree) Reset() { + tr.tr1 = d1New() + tr.tr2 = d2New() + tr.tr3 = d3New() + tr.tr4 = d4New() + tr.tr5 = d5New() + tr.tr6 = d6New() + tr.tr7 = d7New() + tr.tr8 = d8New() + tr.tr9 = d9New() + tr.tr10 = d10New() + tr.tr11 = d11New() + tr.tr12 = d12New() + tr.tr13 = d13New() + tr.tr14 = d14New() + tr.tr15 = d15New() + tr.tr16 = d16New() + tr.tr17 = d17New() + tr.tr18 = d18New() + tr.tr19 = d19New() + tr.tr20 = d20New() +} +func (tr *RTree) Count() int { + count := 0 + count += tr.tr1.Count() + count += tr.tr2.Count() + count += tr.tr3.Count() + count += tr.tr4.Count() + count += tr.tr5.Count() + count += tr.tr6.Count() + count += tr.tr7.Count() + count += tr.tr8.Count() + count += tr.tr9.Count() + count += tr.tr10.Count() + count += tr.tr11.Count() + count += tr.tr12.Count() + count += tr.tr13.Count() + count += tr.tr14.Count() + count += tr.tr15.Count() + count += tr.tr16.Count() + count += tr.tr17.Count() + count += tr.tr18.Count() + count += tr.tr19.Count() + count += tr.tr20.Count() + return count +} +func (tr *RTree) Search(bounds Item, iter Iterator) { + if bounds == nil { + panic("nil bounds being used for search") + } + min, max := bounds.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + switch len(min) { + default: + return // just return + panic("invalid dimension") + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + case 15: + case 16: + case 17: + case 18: + case 19: + case 20: + } + if !tr.search1(min, max, iter) { + return + } + if !tr.search2(min, max, iter) { + return + } + if !tr.search3(min, max, iter) { + return + } + if !tr.search4(min, max, iter) { + return + } + if !tr.search5(min, max, iter) { + return + } + if !tr.search6(min, max, iter) { + return + } + if !tr.search7(min, max, iter) { + return + } + if !tr.search8(min, max, iter) { + return + } + if !tr.search9(min, max, iter) { + return + } + if !tr.search10(min, max, iter) { + return + } + if !tr.search11(min, max, iter) { + return + } + if !tr.search12(min, max, iter) { + return + } + if !tr.search13(min, max, iter) { + return + } + if !tr.search14(min, max, iter) { + return + } + if !tr.search15(min, max, iter) { + return + } + if !tr.search16(min, max, iter) { + return + } + if !tr.search17(min, max, iter) { + return + } + if !tr.search18(min, max, iter) { + return + } + if !tr.search19(min, max, iter) { + return + } + if !tr.search20(min, max, iter) { + return + } +} + +func (tr *RTree) search1(min, max []float64, iter Iterator) bool { + var amin, amax [1]float64 + for i := 0; i < 1; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr1.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search2(min, max []float64, iter Iterator) bool { + var amin, amax [2]float64 + for i := 0; i < 2; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr2.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search3(min, max []float64, iter Iterator) bool { + var amin, amax [3]float64 + for i := 0; i < 3; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr3.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search4(min, max []float64, iter Iterator) bool { + var amin, amax [4]float64 + for i := 0; i < 4; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr4.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search5(min, max []float64, iter Iterator) bool { + var amin, amax [5]float64 + for i := 0; i < 5; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr5.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search6(min, max []float64, iter Iterator) bool { + var amin, amax [6]float64 + for i := 0; i < 6; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr6.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search7(min, max []float64, iter Iterator) bool { + var amin, amax [7]float64 + for i := 0; i < 7; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr7.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search8(min, max []float64, iter Iterator) bool { + var amin, amax [8]float64 + for i := 0; i < 8; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr8.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search9(min, max []float64, iter Iterator) bool { + var amin, amax [9]float64 + for i := 0; i < 9; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr9.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search10(min, max []float64, iter Iterator) bool { + var amin, amax [10]float64 + for i := 0; i < 10; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr10.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search11(min, max []float64, iter Iterator) bool { + var amin, amax [11]float64 + for i := 0; i < 11; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr11.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search12(min, max []float64, iter Iterator) bool { + var amin, amax [12]float64 + for i := 0; i < 12; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr12.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search13(min, max []float64, iter Iterator) bool { + var amin, amax [13]float64 + for i := 0; i < 13; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr13.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search14(min, max []float64, iter Iterator) bool { + var amin, amax [14]float64 + for i := 0; i < 14; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr14.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search15(min, max []float64, iter Iterator) bool { + var amin, amax [15]float64 + for i := 0; i < 15; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr15.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search16(min, max []float64, iter Iterator) bool { + var amin, amax [16]float64 + for i := 0; i < 16; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr16.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search17(min, max []float64, iter Iterator) bool { + var amin, amax [17]float64 + for i := 0; i < 17; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr17.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search18(min, max []float64, iter Iterator) bool { + var amin, amax [18]float64 + for i := 0; i < 18; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr18.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search19(min, max []float64, iter Iterator) bool { + var amin, amax [19]float64 + for i := 0; i < 19; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr19.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search20(min, max []float64, iter Iterator) bool { + var amin, amax [20]float64 + for i := 0; i < 20; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr20.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func d1fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d1fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d1numDims = 1 + d1maxNodes = 8 + d1minNodes = d1maxNodes / 2 + d1useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d1unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d1numDims] + +type d1RTree struct { + root *d1nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d1rectT struct { + min [d1numDims]float64 ///< Min dimensions of bounding box + max [d1numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d1branchT struct { + rect d1rectT ///< Bounds + child *d1nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d1nodeT for each branch level +type d1nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d1maxNodes]d1branchT ///< Branch +} + +func (node *d1nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d1nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d1listNodeT struct { + next *d1listNodeT ///< Next in list + node *d1nodeT ///< Node +} + +const d1notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d1partitionVarsT struct { + partition [d1maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d1rectT + area [2]float64 + + branchBuf [d1maxNodes + 1]d1branchT + branchCount int + coverSplit d1rectT + coverSplitArea float64 +} + +func d1New() *d1RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d1RTree{ + root: &d1nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d1RTree) Insert(min, max [d1numDims]float64, dataId interface{}) { + var branch d1branchT + branch.data = dataId + for axis := 0; axis < d1numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d1insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d1RTree) Remove(min, max [d1numDims]float64, dataId interface{}) { + var rect d1rectT + for axis := 0; axis < d1numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d1removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d1search rectangle +/// \param a_min Min of d1search bounding rect +/// \param a_max Max of d1search bounding rect +/// \param a_searchResult d1search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d1RTree) Search(min, max [d1numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d1rectT + for axis := 0; axis < d1numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d1search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d1RTree) Count() int { + var count int + d1countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d1RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d1nodeT{} +} + +func d1countRec(node *d1nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d1countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d1insertRectRec(branch *d1branchT, node *d1nodeT, newNode **d1nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d1nodeT + //var newBranch d1branchT + + // find the optimal branch for this record + index := d1pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d1insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d1combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d1nodeCover(node.branch[index].child) + var newBranch d1branchT + newBranch.child = otherNode + newBranch.rect = d1nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d1addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d1addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d1insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d1insertRect(branch *d1branchT, root **d1nodeT, level int) bool { + var newNode *d1nodeT + + if d1insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d1nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d1branchT + + // add old root node as a child of the new root + newBranch.rect = d1nodeCover(*root) + newBranch.child = *root + d1addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d1nodeCover(newNode) + newBranch.child = newNode + d1addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d1nodeCover(node *d1nodeT) d1rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d1combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d1addBranch(branch *d1branchT, node *d1nodeT, newNode **d1nodeT) bool { + if node.count < d1maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d1splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d1disconnectBranch(node *d1nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d1pickBranch(rect *d1rectT, node *d1nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d1rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d1calcRectVolume(curRect) + tempRect = d1combineRect(rect, curRect) + increase = d1calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d1combineRect(rectA, rectB *d1rectT) d1rectT { + var newRect d1rectT + + for index := 0; index < d1numDims; index++ { + newRect.min[index] = d1fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d1fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d1splitNode(node *d1nodeT, branch *d1branchT, newNode **d1nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d1partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d1getBranches(node, branch, parVars) + + // Find partition + d1choosePartition(parVars, d1minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d1nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d1loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d1rectVolume(rect *d1rectT) float64 { + var volume float64 = 1 + for index := 0; index < d1numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d1rectT +func d1rectSphericalVolume(rect *d1rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d1numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d1numDims == 5 { + return (radius * radius * radius * radius * radius * d1unitSphereVolume) + } else if d1numDims == 4 { + return (radius * radius * radius * radius * d1unitSphereVolume) + } else if d1numDims == 3 { + return (radius * radius * radius * d1unitSphereVolume) + } else if d1numDims == 2 { + return (radius * radius * d1unitSphereVolume) + } else { + return (math.Pow(radius, d1numDims) * d1unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d1calcRectVolume(rect *d1rectT) float64 { + if d1useSphericalVolume { + return d1rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d1rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d1getBranches(node *d1nodeT, branch *d1branchT, parVars *d1partitionVarsT) { + // Load the branch buffer + for index := 0; index < d1maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d1maxNodes] = *branch + parVars.branchCount = d1maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d1maxNodes+1; index++ { + parVars.coverSplit = d1combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d1calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d1choosePartition(parVars *d1partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d1initParVars(parVars, parVars.branchCount, minFill) + d1pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d1notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d1combineRect(curRect, &parVars.cover[0]) + rect1 := d1combineRect(curRect, &parVars.cover[1]) + growth0 := d1calcRectVolume(&rect0) - parVars.area[0] + growth1 := d1calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d1classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d1notTaken == parVars.partition[index] { + d1classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d1loadNodes(nodeA, nodeB *d1nodeT, parVars *d1partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d1nodeT{nodeA, nodeB} + + // It is assured that d1addBranch here will not cause a node split. + d1addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d1partitionVarsT structure. +func d1initParVars(parVars *d1partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d1notTaken + } +} + +func d1pickSeeds(parVars *d1partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d1maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d1calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d1combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d1calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d1classify(seed0, 0, parVars) + d1classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d1classify(index, group int, parVars *d1partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d1combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d1calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d1rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d1removeRect provides for eliminating the root. +func d1removeRect(rect *d1rectT, id interface{}, root **d1nodeT) bool { + var reInsertList *d1listNodeT + + if !d1removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d1insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d1removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d1removeRectRec(rect *d1rectT, id interface{}, node *d1nodeT, listNode **d1listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d1overlap(*rect, node.branch[index].rect) { + if !d1removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d1minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d1nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d1reInsert(node.branch[index].child, listNode) + d1disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d1disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d1overlap. +func d1overlap(rectA, rectB d1rectT) bool { + for index := 0; index < d1numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d1reInsert(node *d1nodeT, listNode **d1listNodeT) { + newListNode := &d1listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d1search in an index tree or subtree for all data retangles that d1overlap the argument rectangle. +func d1search(node *d1nodeT, rect d1rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d1overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d1search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d1overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d2fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d2fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d2numDims = 2 + d2maxNodes = 8 + d2minNodes = d2maxNodes / 2 + d2useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d2unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d2numDims] + +type d2RTree struct { + root *d2nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d2rectT struct { + min [d2numDims]float64 ///< Min dimensions of bounding box + max [d2numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d2branchT struct { + rect d2rectT ///< Bounds + child *d2nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d2nodeT for each branch level +type d2nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d2maxNodes]d2branchT ///< Branch +} + +func (node *d2nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d2nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d2listNodeT struct { + next *d2listNodeT ///< Next in list + node *d2nodeT ///< Node +} + +const d2notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d2partitionVarsT struct { + partition [d2maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d2rectT + area [2]float64 + + branchBuf [d2maxNodes + 1]d2branchT + branchCount int + coverSplit d2rectT + coverSplitArea float64 +} + +func d2New() *d2RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d2RTree{ + root: &d2nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d2RTree) Insert(min, max [d2numDims]float64, dataId interface{}) { + var branch d2branchT + branch.data = dataId + for axis := 0; axis < d2numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d2insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d2RTree) Remove(min, max [d2numDims]float64, dataId interface{}) { + var rect d2rectT + for axis := 0; axis < d2numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d2removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d2search rectangle +/// \param a_min Min of d2search bounding rect +/// \param a_max Max of d2search bounding rect +/// \param a_searchResult d2search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d2RTree) Search(min, max [d2numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d2rectT + for axis := 0; axis < d2numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d2search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d2RTree) Count() int { + var count int + d2countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d2RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d2nodeT{} +} + +func d2countRec(node *d2nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d2countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d2insertRectRec(branch *d2branchT, node *d2nodeT, newNode **d2nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d2nodeT + //var newBranch d2branchT + + // find the optimal branch for this record + index := d2pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d2insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d2combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d2nodeCover(node.branch[index].child) + var newBranch d2branchT + newBranch.child = otherNode + newBranch.rect = d2nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d2addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d2addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d2insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d2insertRect(branch *d2branchT, root **d2nodeT, level int) bool { + var newNode *d2nodeT + + if d2insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d2nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d2branchT + + // add old root node as a child of the new root + newBranch.rect = d2nodeCover(*root) + newBranch.child = *root + d2addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d2nodeCover(newNode) + newBranch.child = newNode + d2addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d2nodeCover(node *d2nodeT) d2rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d2combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d2addBranch(branch *d2branchT, node *d2nodeT, newNode **d2nodeT) bool { + if node.count < d2maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d2splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d2disconnectBranch(node *d2nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d2pickBranch(rect *d2rectT, node *d2nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d2rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d2calcRectVolume(curRect) + tempRect = d2combineRect(rect, curRect) + increase = d2calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d2combineRect(rectA, rectB *d2rectT) d2rectT { + var newRect d2rectT + + for index := 0; index < d2numDims; index++ { + newRect.min[index] = d2fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d2fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d2splitNode(node *d2nodeT, branch *d2branchT, newNode **d2nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d2partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d2getBranches(node, branch, parVars) + + // Find partition + d2choosePartition(parVars, d2minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d2nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d2loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d2rectVolume(rect *d2rectT) float64 { + var volume float64 = 1 + for index := 0; index < d2numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d2rectT +func d2rectSphericalVolume(rect *d2rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d2numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d2numDims == 5 { + return (radius * radius * radius * radius * radius * d2unitSphereVolume) + } else if d2numDims == 4 { + return (radius * radius * radius * radius * d2unitSphereVolume) + } else if d2numDims == 3 { + return (radius * radius * radius * d2unitSphereVolume) + } else if d2numDims == 2 { + return (radius * radius * d2unitSphereVolume) + } else { + return (math.Pow(radius, d2numDims) * d2unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d2calcRectVolume(rect *d2rectT) float64 { + if d2useSphericalVolume { + return d2rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d2rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d2getBranches(node *d2nodeT, branch *d2branchT, parVars *d2partitionVarsT) { + // Load the branch buffer + for index := 0; index < d2maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d2maxNodes] = *branch + parVars.branchCount = d2maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d2maxNodes+1; index++ { + parVars.coverSplit = d2combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d2calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d2choosePartition(parVars *d2partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d2initParVars(parVars, parVars.branchCount, minFill) + d2pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d2notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d2combineRect(curRect, &parVars.cover[0]) + rect1 := d2combineRect(curRect, &parVars.cover[1]) + growth0 := d2calcRectVolume(&rect0) - parVars.area[0] + growth1 := d2calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d2classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d2notTaken == parVars.partition[index] { + d2classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d2loadNodes(nodeA, nodeB *d2nodeT, parVars *d2partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d2nodeT{nodeA, nodeB} + + // It is assured that d2addBranch here will not cause a node split. + d2addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d2partitionVarsT structure. +func d2initParVars(parVars *d2partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d2notTaken + } +} + +func d2pickSeeds(parVars *d2partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d2maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d2calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d2combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d2calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d2classify(seed0, 0, parVars) + d2classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d2classify(index, group int, parVars *d2partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d2combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d2calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d2rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d2removeRect provides for eliminating the root. +func d2removeRect(rect *d2rectT, id interface{}, root **d2nodeT) bool { + var reInsertList *d2listNodeT + + if !d2removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d2insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d2removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d2removeRectRec(rect *d2rectT, id interface{}, node *d2nodeT, listNode **d2listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d2overlap(*rect, node.branch[index].rect) { + if !d2removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d2minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d2nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d2reInsert(node.branch[index].child, listNode) + d2disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d2disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d2overlap. +func d2overlap(rectA, rectB d2rectT) bool { + for index := 0; index < d2numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d2reInsert(node *d2nodeT, listNode **d2listNodeT) { + newListNode := &d2listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d2search in an index tree or subtree for all data retangles that d2overlap the argument rectangle. +func d2search(node *d2nodeT, rect d2rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d2overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d2search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d2overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d3fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d3fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d3numDims = 3 + d3maxNodes = 8 + d3minNodes = d3maxNodes / 2 + d3useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d3unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d3numDims] + +type d3RTree struct { + root *d3nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d3rectT struct { + min [d3numDims]float64 ///< Min dimensions of bounding box + max [d3numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d3branchT struct { + rect d3rectT ///< Bounds + child *d3nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d3nodeT for each branch level +type d3nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d3maxNodes]d3branchT ///< Branch +} + +func (node *d3nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d3nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d3listNodeT struct { + next *d3listNodeT ///< Next in list + node *d3nodeT ///< Node +} + +const d3notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d3partitionVarsT struct { + partition [d3maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d3rectT + area [2]float64 + + branchBuf [d3maxNodes + 1]d3branchT + branchCount int + coverSplit d3rectT + coverSplitArea float64 +} + +func d3New() *d3RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d3RTree{ + root: &d3nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d3RTree) Insert(min, max [d3numDims]float64, dataId interface{}) { + var branch d3branchT + branch.data = dataId + for axis := 0; axis < d3numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d3insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d3RTree) Remove(min, max [d3numDims]float64, dataId interface{}) { + var rect d3rectT + for axis := 0; axis < d3numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d3removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d3search rectangle +/// \param a_min Min of d3search bounding rect +/// \param a_max Max of d3search bounding rect +/// \param a_searchResult d3search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d3RTree) Search(min, max [d3numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d3rectT + for axis := 0; axis < d3numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d3search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d3RTree) Count() int { + var count int + d3countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d3RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d3nodeT{} +} + +func d3countRec(node *d3nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d3countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d3insertRectRec(branch *d3branchT, node *d3nodeT, newNode **d3nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d3nodeT + //var newBranch d3branchT + + // find the optimal branch for this record + index := d3pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d3insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d3combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d3nodeCover(node.branch[index].child) + var newBranch d3branchT + newBranch.child = otherNode + newBranch.rect = d3nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d3addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d3addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d3insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d3insertRect(branch *d3branchT, root **d3nodeT, level int) bool { + var newNode *d3nodeT + + if d3insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d3nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d3branchT + + // add old root node as a child of the new root + newBranch.rect = d3nodeCover(*root) + newBranch.child = *root + d3addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d3nodeCover(newNode) + newBranch.child = newNode + d3addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d3nodeCover(node *d3nodeT) d3rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d3combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d3addBranch(branch *d3branchT, node *d3nodeT, newNode **d3nodeT) bool { + if node.count < d3maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d3splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d3disconnectBranch(node *d3nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d3pickBranch(rect *d3rectT, node *d3nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d3rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d3calcRectVolume(curRect) + tempRect = d3combineRect(rect, curRect) + increase = d3calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d3combineRect(rectA, rectB *d3rectT) d3rectT { + var newRect d3rectT + + for index := 0; index < d3numDims; index++ { + newRect.min[index] = d3fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d3fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d3splitNode(node *d3nodeT, branch *d3branchT, newNode **d3nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d3partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d3getBranches(node, branch, parVars) + + // Find partition + d3choosePartition(parVars, d3minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d3nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d3loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d3rectVolume(rect *d3rectT) float64 { + var volume float64 = 1 + for index := 0; index < d3numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d3rectT +func d3rectSphericalVolume(rect *d3rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d3numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d3numDims == 5 { + return (radius * radius * radius * radius * radius * d3unitSphereVolume) + } else if d3numDims == 4 { + return (radius * radius * radius * radius * d3unitSphereVolume) + } else if d3numDims == 3 { + return (radius * radius * radius * d3unitSphereVolume) + } else if d3numDims == 2 { + return (radius * radius * d3unitSphereVolume) + } else { + return (math.Pow(radius, d3numDims) * d3unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d3calcRectVolume(rect *d3rectT) float64 { + if d3useSphericalVolume { + return d3rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d3rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d3getBranches(node *d3nodeT, branch *d3branchT, parVars *d3partitionVarsT) { + // Load the branch buffer + for index := 0; index < d3maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d3maxNodes] = *branch + parVars.branchCount = d3maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d3maxNodes+1; index++ { + parVars.coverSplit = d3combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d3calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d3choosePartition(parVars *d3partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d3initParVars(parVars, parVars.branchCount, minFill) + d3pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d3notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d3combineRect(curRect, &parVars.cover[0]) + rect1 := d3combineRect(curRect, &parVars.cover[1]) + growth0 := d3calcRectVolume(&rect0) - parVars.area[0] + growth1 := d3calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d3classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d3notTaken == parVars.partition[index] { + d3classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d3loadNodes(nodeA, nodeB *d3nodeT, parVars *d3partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d3nodeT{nodeA, nodeB} + + // It is assured that d3addBranch here will not cause a node split. + d3addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d3partitionVarsT structure. +func d3initParVars(parVars *d3partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d3notTaken + } +} + +func d3pickSeeds(parVars *d3partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d3maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d3calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d3combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d3calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d3classify(seed0, 0, parVars) + d3classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d3classify(index, group int, parVars *d3partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d3combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d3calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d3rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d3removeRect provides for eliminating the root. +func d3removeRect(rect *d3rectT, id interface{}, root **d3nodeT) bool { + var reInsertList *d3listNodeT + + if !d3removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d3insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d3removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d3removeRectRec(rect *d3rectT, id interface{}, node *d3nodeT, listNode **d3listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d3overlap(*rect, node.branch[index].rect) { + if !d3removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d3minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d3nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d3reInsert(node.branch[index].child, listNode) + d3disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d3disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d3overlap. +func d3overlap(rectA, rectB d3rectT) bool { + for index := 0; index < d3numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d3reInsert(node *d3nodeT, listNode **d3listNodeT) { + newListNode := &d3listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d3search in an index tree or subtree for all data retangles that d3overlap the argument rectangle. +func d3search(node *d3nodeT, rect d3rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d3overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d3search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d3overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d4fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d4fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d4numDims = 4 + d4maxNodes = 8 + d4minNodes = d4maxNodes / 2 + d4useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d4unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d4numDims] + +type d4RTree struct { + root *d4nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d4rectT struct { + min [d4numDims]float64 ///< Min dimensions of bounding box + max [d4numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d4branchT struct { + rect d4rectT ///< Bounds + child *d4nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d4nodeT for each branch level +type d4nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d4maxNodes]d4branchT ///< Branch +} + +func (node *d4nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d4nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d4listNodeT struct { + next *d4listNodeT ///< Next in list + node *d4nodeT ///< Node +} + +const d4notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d4partitionVarsT struct { + partition [d4maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d4rectT + area [2]float64 + + branchBuf [d4maxNodes + 1]d4branchT + branchCount int + coverSplit d4rectT + coverSplitArea float64 +} + +func d4New() *d4RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d4RTree{ + root: &d4nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d4RTree) Insert(min, max [d4numDims]float64, dataId interface{}) { + var branch d4branchT + branch.data = dataId + for axis := 0; axis < d4numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d4insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d4RTree) Remove(min, max [d4numDims]float64, dataId interface{}) { + var rect d4rectT + for axis := 0; axis < d4numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d4removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d4search rectangle +/// \param a_min Min of d4search bounding rect +/// \param a_max Max of d4search bounding rect +/// \param a_searchResult d4search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d4RTree) Search(min, max [d4numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d4rectT + for axis := 0; axis < d4numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d4search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d4RTree) Count() int { + var count int + d4countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d4RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d4nodeT{} +} + +func d4countRec(node *d4nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d4countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d4insertRectRec(branch *d4branchT, node *d4nodeT, newNode **d4nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d4nodeT + //var newBranch d4branchT + + // find the optimal branch for this record + index := d4pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d4insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d4combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d4nodeCover(node.branch[index].child) + var newBranch d4branchT + newBranch.child = otherNode + newBranch.rect = d4nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d4addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d4addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d4insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d4insertRect(branch *d4branchT, root **d4nodeT, level int) bool { + var newNode *d4nodeT + + if d4insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d4nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d4branchT + + // add old root node as a child of the new root + newBranch.rect = d4nodeCover(*root) + newBranch.child = *root + d4addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d4nodeCover(newNode) + newBranch.child = newNode + d4addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d4nodeCover(node *d4nodeT) d4rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d4combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d4addBranch(branch *d4branchT, node *d4nodeT, newNode **d4nodeT) bool { + if node.count < d4maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d4splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d4disconnectBranch(node *d4nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d4pickBranch(rect *d4rectT, node *d4nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d4rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d4calcRectVolume(curRect) + tempRect = d4combineRect(rect, curRect) + increase = d4calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d4combineRect(rectA, rectB *d4rectT) d4rectT { + var newRect d4rectT + + for index := 0; index < d4numDims; index++ { + newRect.min[index] = d4fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d4fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d4splitNode(node *d4nodeT, branch *d4branchT, newNode **d4nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d4partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d4getBranches(node, branch, parVars) + + // Find partition + d4choosePartition(parVars, d4minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d4nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d4loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d4rectVolume(rect *d4rectT) float64 { + var volume float64 = 1 + for index := 0; index < d4numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d4rectT +func d4rectSphericalVolume(rect *d4rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d4numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d4numDims == 5 { + return (radius * radius * radius * radius * radius * d4unitSphereVolume) + } else if d4numDims == 4 { + return (radius * radius * radius * radius * d4unitSphereVolume) + } else if d4numDims == 3 { + return (radius * radius * radius * d4unitSphereVolume) + } else if d4numDims == 2 { + return (radius * radius * d4unitSphereVolume) + } else { + return (math.Pow(radius, d4numDims) * d4unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d4calcRectVolume(rect *d4rectT) float64 { + if d4useSphericalVolume { + return d4rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d4rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d4getBranches(node *d4nodeT, branch *d4branchT, parVars *d4partitionVarsT) { + // Load the branch buffer + for index := 0; index < d4maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d4maxNodes] = *branch + parVars.branchCount = d4maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d4maxNodes+1; index++ { + parVars.coverSplit = d4combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d4calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d4choosePartition(parVars *d4partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d4initParVars(parVars, parVars.branchCount, minFill) + d4pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d4notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d4combineRect(curRect, &parVars.cover[0]) + rect1 := d4combineRect(curRect, &parVars.cover[1]) + growth0 := d4calcRectVolume(&rect0) - parVars.area[0] + growth1 := d4calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d4classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d4notTaken == parVars.partition[index] { + d4classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d4loadNodes(nodeA, nodeB *d4nodeT, parVars *d4partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d4nodeT{nodeA, nodeB} + + // It is assured that d4addBranch here will not cause a node split. + d4addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d4partitionVarsT structure. +func d4initParVars(parVars *d4partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d4notTaken + } +} + +func d4pickSeeds(parVars *d4partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d4maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d4calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d4combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d4calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d4classify(seed0, 0, parVars) + d4classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d4classify(index, group int, parVars *d4partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d4combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d4calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d4rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d4removeRect provides for eliminating the root. +func d4removeRect(rect *d4rectT, id interface{}, root **d4nodeT) bool { + var reInsertList *d4listNodeT + + if !d4removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d4insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d4removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d4removeRectRec(rect *d4rectT, id interface{}, node *d4nodeT, listNode **d4listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d4overlap(*rect, node.branch[index].rect) { + if !d4removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d4minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d4nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d4reInsert(node.branch[index].child, listNode) + d4disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d4disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d4overlap. +func d4overlap(rectA, rectB d4rectT) bool { + for index := 0; index < d4numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d4reInsert(node *d4nodeT, listNode **d4listNodeT) { + newListNode := &d4listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d4search in an index tree or subtree for all data retangles that d4overlap the argument rectangle. +func d4search(node *d4nodeT, rect d4rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d4overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d4search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d4overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d5fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d5fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d5numDims = 5 + d5maxNodes = 8 + d5minNodes = d5maxNodes / 2 + d5useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d5unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d5numDims] + +type d5RTree struct { + root *d5nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d5rectT struct { + min [d5numDims]float64 ///< Min dimensions of bounding box + max [d5numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d5branchT struct { + rect d5rectT ///< Bounds + child *d5nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d5nodeT for each branch level +type d5nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d5maxNodes]d5branchT ///< Branch +} + +func (node *d5nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d5nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d5listNodeT struct { + next *d5listNodeT ///< Next in list + node *d5nodeT ///< Node +} + +const d5notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d5partitionVarsT struct { + partition [d5maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d5rectT + area [2]float64 + + branchBuf [d5maxNodes + 1]d5branchT + branchCount int + coverSplit d5rectT + coverSplitArea float64 +} + +func d5New() *d5RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d5RTree{ + root: &d5nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d5RTree) Insert(min, max [d5numDims]float64, dataId interface{}) { + var branch d5branchT + branch.data = dataId + for axis := 0; axis < d5numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d5insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d5RTree) Remove(min, max [d5numDims]float64, dataId interface{}) { + var rect d5rectT + for axis := 0; axis < d5numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d5removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d5search rectangle +/// \param a_min Min of d5search bounding rect +/// \param a_max Max of d5search bounding rect +/// \param a_searchResult d5search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d5RTree) Search(min, max [d5numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d5rectT + for axis := 0; axis < d5numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d5search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d5RTree) Count() int { + var count int + d5countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d5RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d5nodeT{} +} + +func d5countRec(node *d5nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d5countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d5insertRectRec(branch *d5branchT, node *d5nodeT, newNode **d5nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d5nodeT + //var newBranch d5branchT + + // find the optimal branch for this record + index := d5pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d5insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d5combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d5nodeCover(node.branch[index].child) + var newBranch d5branchT + newBranch.child = otherNode + newBranch.rect = d5nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d5addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d5addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d5insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d5insertRect(branch *d5branchT, root **d5nodeT, level int) bool { + var newNode *d5nodeT + + if d5insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d5nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d5branchT + + // add old root node as a child of the new root + newBranch.rect = d5nodeCover(*root) + newBranch.child = *root + d5addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d5nodeCover(newNode) + newBranch.child = newNode + d5addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d5nodeCover(node *d5nodeT) d5rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d5combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d5addBranch(branch *d5branchT, node *d5nodeT, newNode **d5nodeT) bool { + if node.count < d5maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d5splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d5disconnectBranch(node *d5nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d5pickBranch(rect *d5rectT, node *d5nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d5rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d5calcRectVolume(curRect) + tempRect = d5combineRect(rect, curRect) + increase = d5calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d5combineRect(rectA, rectB *d5rectT) d5rectT { + var newRect d5rectT + + for index := 0; index < d5numDims; index++ { + newRect.min[index] = d5fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d5fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d5splitNode(node *d5nodeT, branch *d5branchT, newNode **d5nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d5partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d5getBranches(node, branch, parVars) + + // Find partition + d5choosePartition(parVars, d5minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d5nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d5loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d5rectVolume(rect *d5rectT) float64 { + var volume float64 = 1 + for index := 0; index < d5numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d5rectT +func d5rectSphericalVolume(rect *d5rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d5numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d5numDims == 5 { + return (radius * radius * radius * radius * radius * d5unitSphereVolume) + } else if d5numDims == 4 { + return (radius * radius * radius * radius * d5unitSphereVolume) + } else if d5numDims == 3 { + return (radius * radius * radius * d5unitSphereVolume) + } else if d5numDims == 2 { + return (radius * radius * d5unitSphereVolume) + } else { + return (math.Pow(radius, d5numDims) * d5unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d5calcRectVolume(rect *d5rectT) float64 { + if d5useSphericalVolume { + return d5rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d5rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d5getBranches(node *d5nodeT, branch *d5branchT, parVars *d5partitionVarsT) { + // Load the branch buffer + for index := 0; index < d5maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d5maxNodes] = *branch + parVars.branchCount = d5maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d5maxNodes+1; index++ { + parVars.coverSplit = d5combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d5calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d5choosePartition(parVars *d5partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d5initParVars(parVars, parVars.branchCount, minFill) + d5pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d5notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d5combineRect(curRect, &parVars.cover[0]) + rect1 := d5combineRect(curRect, &parVars.cover[1]) + growth0 := d5calcRectVolume(&rect0) - parVars.area[0] + growth1 := d5calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d5classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d5notTaken == parVars.partition[index] { + d5classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d5loadNodes(nodeA, nodeB *d5nodeT, parVars *d5partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d5nodeT{nodeA, nodeB} + + // It is assured that d5addBranch here will not cause a node split. + d5addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d5partitionVarsT structure. +func d5initParVars(parVars *d5partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d5notTaken + } +} + +func d5pickSeeds(parVars *d5partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d5maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d5calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d5combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d5calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d5classify(seed0, 0, parVars) + d5classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d5classify(index, group int, parVars *d5partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d5combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d5calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d5rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d5removeRect provides for eliminating the root. +func d5removeRect(rect *d5rectT, id interface{}, root **d5nodeT) bool { + var reInsertList *d5listNodeT + + if !d5removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d5insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d5removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d5removeRectRec(rect *d5rectT, id interface{}, node *d5nodeT, listNode **d5listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d5overlap(*rect, node.branch[index].rect) { + if !d5removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d5minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d5nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d5reInsert(node.branch[index].child, listNode) + d5disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d5disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d5overlap. +func d5overlap(rectA, rectB d5rectT) bool { + for index := 0; index < d5numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d5reInsert(node *d5nodeT, listNode **d5listNodeT) { + newListNode := &d5listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d5search in an index tree or subtree for all data retangles that d5overlap the argument rectangle. +func d5search(node *d5nodeT, rect d5rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d5overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d5search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d5overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d6fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d6fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d6numDims = 6 + d6maxNodes = 8 + d6minNodes = d6maxNodes / 2 + d6useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d6unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d6numDims] + +type d6RTree struct { + root *d6nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d6rectT struct { + min [d6numDims]float64 ///< Min dimensions of bounding box + max [d6numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d6branchT struct { + rect d6rectT ///< Bounds + child *d6nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d6nodeT for each branch level +type d6nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d6maxNodes]d6branchT ///< Branch +} + +func (node *d6nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d6nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d6listNodeT struct { + next *d6listNodeT ///< Next in list + node *d6nodeT ///< Node +} + +const d6notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d6partitionVarsT struct { + partition [d6maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d6rectT + area [2]float64 + + branchBuf [d6maxNodes + 1]d6branchT + branchCount int + coverSplit d6rectT + coverSplitArea float64 +} + +func d6New() *d6RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d6RTree{ + root: &d6nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d6RTree) Insert(min, max [d6numDims]float64, dataId interface{}) { + var branch d6branchT + branch.data = dataId + for axis := 0; axis < d6numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d6insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d6RTree) Remove(min, max [d6numDims]float64, dataId interface{}) { + var rect d6rectT + for axis := 0; axis < d6numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d6removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d6search rectangle +/// \param a_min Min of d6search bounding rect +/// \param a_max Max of d6search bounding rect +/// \param a_searchResult d6search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d6RTree) Search(min, max [d6numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d6rectT + for axis := 0; axis < d6numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d6search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d6RTree) Count() int { + var count int + d6countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d6RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d6nodeT{} +} + +func d6countRec(node *d6nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d6countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d6insertRectRec(branch *d6branchT, node *d6nodeT, newNode **d6nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d6nodeT + //var newBranch d6branchT + + // find the optimal branch for this record + index := d6pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d6insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d6combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d6nodeCover(node.branch[index].child) + var newBranch d6branchT + newBranch.child = otherNode + newBranch.rect = d6nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d6addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d6addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d6insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d6insertRect(branch *d6branchT, root **d6nodeT, level int) bool { + var newNode *d6nodeT + + if d6insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d6nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d6branchT + + // add old root node as a child of the new root + newBranch.rect = d6nodeCover(*root) + newBranch.child = *root + d6addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d6nodeCover(newNode) + newBranch.child = newNode + d6addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d6nodeCover(node *d6nodeT) d6rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d6combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d6addBranch(branch *d6branchT, node *d6nodeT, newNode **d6nodeT) bool { + if node.count < d6maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d6splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d6disconnectBranch(node *d6nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d6pickBranch(rect *d6rectT, node *d6nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d6rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d6calcRectVolume(curRect) + tempRect = d6combineRect(rect, curRect) + increase = d6calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d6combineRect(rectA, rectB *d6rectT) d6rectT { + var newRect d6rectT + + for index := 0; index < d6numDims; index++ { + newRect.min[index] = d6fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d6fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d6splitNode(node *d6nodeT, branch *d6branchT, newNode **d6nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d6partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d6getBranches(node, branch, parVars) + + // Find partition + d6choosePartition(parVars, d6minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d6nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d6loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d6rectVolume(rect *d6rectT) float64 { + var volume float64 = 1 + for index := 0; index < d6numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d6rectT +func d6rectSphericalVolume(rect *d6rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d6numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d6numDims == 5 { + return (radius * radius * radius * radius * radius * d6unitSphereVolume) + } else if d6numDims == 4 { + return (radius * radius * radius * radius * d6unitSphereVolume) + } else if d6numDims == 3 { + return (radius * radius * radius * d6unitSphereVolume) + } else if d6numDims == 2 { + return (radius * radius * d6unitSphereVolume) + } else { + return (math.Pow(radius, d6numDims) * d6unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d6calcRectVolume(rect *d6rectT) float64 { + if d6useSphericalVolume { + return d6rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d6rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d6getBranches(node *d6nodeT, branch *d6branchT, parVars *d6partitionVarsT) { + // Load the branch buffer + for index := 0; index < d6maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d6maxNodes] = *branch + parVars.branchCount = d6maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d6maxNodes+1; index++ { + parVars.coverSplit = d6combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d6calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d6choosePartition(parVars *d6partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d6initParVars(parVars, parVars.branchCount, minFill) + d6pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d6notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d6combineRect(curRect, &parVars.cover[0]) + rect1 := d6combineRect(curRect, &parVars.cover[1]) + growth0 := d6calcRectVolume(&rect0) - parVars.area[0] + growth1 := d6calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d6classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d6notTaken == parVars.partition[index] { + d6classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d6loadNodes(nodeA, nodeB *d6nodeT, parVars *d6partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d6nodeT{nodeA, nodeB} + + // It is assured that d6addBranch here will not cause a node split. + d6addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d6partitionVarsT structure. +func d6initParVars(parVars *d6partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d6notTaken + } +} + +func d6pickSeeds(parVars *d6partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d6maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d6calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d6combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d6calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d6classify(seed0, 0, parVars) + d6classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d6classify(index, group int, parVars *d6partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d6combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d6calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d6rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d6removeRect provides for eliminating the root. +func d6removeRect(rect *d6rectT, id interface{}, root **d6nodeT) bool { + var reInsertList *d6listNodeT + + if !d6removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d6insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d6removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d6removeRectRec(rect *d6rectT, id interface{}, node *d6nodeT, listNode **d6listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d6overlap(*rect, node.branch[index].rect) { + if !d6removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d6minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d6nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d6reInsert(node.branch[index].child, listNode) + d6disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d6disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d6overlap. +func d6overlap(rectA, rectB d6rectT) bool { + for index := 0; index < d6numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d6reInsert(node *d6nodeT, listNode **d6listNodeT) { + newListNode := &d6listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d6search in an index tree or subtree for all data retangles that d6overlap the argument rectangle. +func d6search(node *d6nodeT, rect d6rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d6overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d6search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d6overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d7fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d7fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d7numDims = 7 + d7maxNodes = 8 + d7minNodes = d7maxNodes / 2 + d7useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d7unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d7numDims] + +type d7RTree struct { + root *d7nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d7rectT struct { + min [d7numDims]float64 ///< Min dimensions of bounding box + max [d7numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d7branchT struct { + rect d7rectT ///< Bounds + child *d7nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d7nodeT for each branch level +type d7nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d7maxNodes]d7branchT ///< Branch +} + +func (node *d7nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d7nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d7listNodeT struct { + next *d7listNodeT ///< Next in list + node *d7nodeT ///< Node +} + +const d7notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d7partitionVarsT struct { + partition [d7maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d7rectT + area [2]float64 + + branchBuf [d7maxNodes + 1]d7branchT + branchCount int + coverSplit d7rectT + coverSplitArea float64 +} + +func d7New() *d7RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d7RTree{ + root: &d7nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d7RTree) Insert(min, max [d7numDims]float64, dataId interface{}) { + var branch d7branchT + branch.data = dataId + for axis := 0; axis < d7numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d7insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d7RTree) Remove(min, max [d7numDims]float64, dataId interface{}) { + var rect d7rectT + for axis := 0; axis < d7numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d7removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d7search rectangle +/// \param a_min Min of d7search bounding rect +/// \param a_max Max of d7search bounding rect +/// \param a_searchResult d7search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d7RTree) Search(min, max [d7numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d7rectT + for axis := 0; axis < d7numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d7search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d7RTree) Count() int { + var count int + d7countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d7RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d7nodeT{} +} + +func d7countRec(node *d7nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d7countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d7insertRectRec(branch *d7branchT, node *d7nodeT, newNode **d7nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d7nodeT + //var newBranch d7branchT + + // find the optimal branch for this record + index := d7pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d7insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d7combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d7nodeCover(node.branch[index].child) + var newBranch d7branchT + newBranch.child = otherNode + newBranch.rect = d7nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d7addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d7addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d7insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d7insertRect(branch *d7branchT, root **d7nodeT, level int) bool { + var newNode *d7nodeT + + if d7insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d7nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d7branchT + + // add old root node as a child of the new root + newBranch.rect = d7nodeCover(*root) + newBranch.child = *root + d7addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d7nodeCover(newNode) + newBranch.child = newNode + d7addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d7nodeCover(node *d7nodeT) d7rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d7combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d7addBranch(branch *d7branchT, node *d7nodeT, newNode **d7nodeT) bool { + if node.count < d7maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d7splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d7disconnectBranch(node *d7nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d7pickBranch(rect *d7rectT, node *d7nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d7rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d7calcRectVolume(curRect) + tempRect = d7combineRect(rect, curRect) + increase = d7calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d7combineRect(rectA, rectB *d7rectT) d7rectT { + var newRect d7rectT + + for index := 0; index < d7numDims; index++ { + newRect.min[index] = d7fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d7fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d7splitNode(node *d7nodeT, branch *d7branchT, newNode **d7nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d7partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d7getBranches(node, branch, parVars) + + // Find partition + d7choosePartition(parVars, d7minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d7nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d7loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d7rectVolume(rect *d7rectT) float64 { + var volume float64 = 1 + for index := 0; index < d7numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d7rectT +func d7rectSphericalVolume(rect *d7rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d7numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d7numDims == 5 { + return (radius * radius * radius * radius * radius * d7unitSphereVolume) + } else if d7numDims == 4 { + return (radius * radius * radius * radius * d7unitSphereVolume) + } else if d7numDims == 3 { + return (radius * radius * radius * d7unitSphereVolume) + } else if d7numDims == 2 { + return (radius * radius * d7unitSphereVolume) + } else { + return (math.Pow(radius, d7numDims) * d7unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d7calcRectVolume(rect *d7rectT) float64 { + if d7useSphericalVolume { + return d7rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d7rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d7getBranches(node *d7nodeT, branch *d7branchT, parVars *d7partitionVarsT) { + // Load the branch buffer + for index := 0; index < d7maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d7maxNodes] = *branch + parVars.branchCount = d7maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d7maxNodes+1; index++ { + parVars.coverSplit = d7combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d7calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d7choosePartition(parVars *d7partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d7initParVars(parVars, parVars.branchCount, minFill) + d7pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d7notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d7combineRect(curRect, &parVars.cover[0]) + rect1 := d7combineRect(curRect, &parVars.cover[1]) + growth0 := d7calcRectVolume(&rect0) - parVars.area[0] + growth1 := d7calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d7classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d7notTaken == parVars.partition[index] { + d7classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d7loadNodes(nodeA, nodeB *d7nodeT, parVars *d7partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d7nodeT{nodeA, nodeB} + + // It is assured that d7addBranch here will not cause a node split. + d7addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d7partitionVarsT structure. +func d7initParVars(parVars *d7partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d7notTaken + } +} + +func d7pickSeeds(parVars *d7partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d7maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d7calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d7combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d7calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d7classify(seed0, 0, parVars) + d7classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d7classify(index, group int, parVars *d7partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d7combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d7calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d7rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d7removeRect provides for eliminating the root. +func d7removeRect(rect *d7rectT, id interface{}, root **d7nodeT) bool { + var reInsertList *d7listNodeT + + if !d7removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d7insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d7removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d7removeRectRec(rect *d7rectT, id interface{}, node *d7nodeT, listNode **d7listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d7overlap(*rect, node.branch[index].rect) { + if !d7removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d7minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d7nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d7reInsert(node.branch[index].child, listNode) + d7disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d7disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d7overlap. +func d7overlap(rectA, rectB d7rectT) bool { + for index := 0; index < d7numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d7reInsert(node *d7nodeT, listNode **d7listNodeT) { + newListNode := &d7listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d7search in an index tree or subtree for all data retangles that d7overlap the argument rectangle. +func d7search(node *d7nodeT, rect d7rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d7overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d7search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d7overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d8fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d8fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d8numDims = 8 + d8maxNodes = 8 + d8minNodes = d8maxNodes / 2 + d8useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d8unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d8numDims] + +type d8RTree struct { + root *d8nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d8rectT struct { + min [d8numDims]float64 ///< Min dimensions of bounding box + max [d8numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d8branchT struct { + rect d8rectT ///< Bounds + child *d8nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d8nodeT for each branch level +type d8nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d8maxNodes]d8branchT ///< Branch +} + +func (node *d8nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d8nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d8listNodeT struct { + next *d8listNodeT ///< Next in list + node *d8nodeT ///< Node +} + +const d8notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d8partitionVarsT struct { + partition [d8maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d8rectT + area [2]float64 + + branchBuf [d8maxNodes + 1]d8branchT + branchCount int + coverSplit d8rectT + coverSplitArea float64 +} + +func d8New() *d8RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d8RTree{ + root: &d8nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d8RTree) Insert(min, max [d8numDims]float64, dataId interface{}) { + var branch d8branchT + branch.data = dataId + for axis := 0; axis < d8numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d8insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d8RTree) Remove(min, max [d8numDims]float64, dataId interface{}) { + var rect d8rectT + for axis := 0; axis < d8numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d8removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d8search rectangle +/// \param a_min Min of d8search bounding rect +/// \param a_max Max of d8search bounding rect +/// \param a_searchResult d8search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d8RTree) Search(min, max [d8numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d8rectT + for axis := 0; axis < d8numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d8search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d8RTree) Count() int { + var count int + d8countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d8RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d8nodeT{} +} + +func d8countRec(node *d8nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d8countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d8insertRectRec(branch *d8branchT, node *d8nodeT, newNode **d8nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d8nodeT + //var newBranch d8branchT + + // find the optimal branch for this record + index := d8pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d8insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d8combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d8nodeCover(node.branch[index].child) + var newBranch d8branchT + newBranch.child = otherNode + newBranch.rect = d8nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d8addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d8addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d8insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d8insertRect(branch *d8branchT, root **d8nodeT, level int) bool { + var newNode *d8nodeT + + if d8insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d8nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d8branchT + + // add old root node as a child of the new root + newBranch.rect = d8nodeCover(*root) + newBranch.child = *root + d8addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d8nodeCover(newNode) + newBranch.child = newNode + d8addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d8nodeCover(node *d8nodeT) d8rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d8combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d8addBranch(branch *d8branchT, node *d8nodeT, newNode **d8nodeT) bool { + if node.count < d8maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d8splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d8disconnectBranch(node *d8nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d8pickBranch(rect *d8rectT, node *d8nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d8rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d8calcRectVolume(curRect) + tempRect = d8combineRect(rect, curRect) + increase = d8calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d8combineRect(rectA, rectB *d8rectT) d8rectT { + var newRect d8rectT + + for index := 0; index < d8numDims; index++ { + newRect.min[index] = d8fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d8fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d8splitNode(node *d8nodeT, branch *d8branchT, newNode **d8nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d8partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d8getBranches(node, branch, parVars) + + // Find partition + d8choosePartition(parVars, d8minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d8nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d8loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d8rectVolume(rect *d8rectT) float64 { + var volume float64 = 1 + for index := 0; index < d8numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d8rectT +func d8rectSphericalVolume(rect *d8rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d8numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d8numDims == 5 { + return (radius * radius * radius * radius * radius * d8unitSphereVolume) + } else if d8numDims == 4 { + return (radius * radius * radius * radius * d8unitSphereVolume) + } else if d8numDims == 3 { + return (radius * radius * radius * d8unitSphereVolume) + } else if d8numDims == 2 { + return (radius * radius * d8unitSphereVolume) + } else { + return (math.Pow(radius, d8numDims) * d8unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d8calcRectVolume(rect *d8rectT) float64 { + if d8useSphericalVolume { + return d8rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d8rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d8getBranches(node *d8nodeT, branch *d8branchT, parVars *d8partitionVarsT) { + // Load the branch buffer + for index := 0; index < d8maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d8maxNodes] = *branch + parVars.branchCount = d8maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d8maxNodes+1; index++ { + parVars.coverSplit = d8combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d8calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d8choosePartition(parVars *d8partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d8initParVars(parVars, parVars.branchCount, minFill) + d8pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d8notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d8combineRect(curRect, &parVars.cover[0]) + rect1 := d8combineRect(curRect, &parVars.cover[1]) + growth0 := d8calcRectVolume(&rect0) - parVars.area[0] + growth1 := d8calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d8classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d8notTaken == parVars.partition[index] { + d8classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d8loadNodes(nodeA, nodeB *d8nodeT, parVars *d8partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d8nodeT{nodeA, nodeB} + + // It is assured that d8addBranch here will not cause a node split. + d8addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d8partitionVarsT structure. +func d8initParVars(parVars *d8partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d8notTaken + } +} + +func d8pickSeeds(parVars *d8partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d8maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d8calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d8combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d8calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d8classify(seed0, 0, parVars) + d8classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d8classify(index, group int, parVars *d8partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d8combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d8calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d8rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d8removeRect provides for eliminating the root. +func d8removeRect(rect *d8rectT, id interface{}, root **d8nodeT) bool { + var reInsertList *d8listNodeT + + if !d8removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d8insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d8removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d8removeRectRec(rect *d8rectT, id interface{}, node *d8nodeT, listNode **d8listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d8overlap(*rect, node.branch[index].rect) { + if !d8removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d8minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d8nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d8reInsert(node.branch[index].child, listNode) + d8disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d8disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d8overlap. +func d8overlap(rectA, rectB d8rectT) bool { + for index := 0; index < d8numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d8reInsert(node *d8nodeT, listNode **d8listNodeT) { + newListNode := &d8listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d8search in an index tree or subtree for all data retangles that d8overlap the argument rectangle. +func d8search(node *d8nodeT, rect d8rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d8overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d8search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d8overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d9fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d9fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d9numDims = 9 + d9maxNodes = 8 + d9minNodes = d9maxNodes / 2 + d9useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d9unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d9numDims] + +type d9RTree struct { + root *d9nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d9rectT struct { + min [d9numDims]float64 ///< Min dimensions of bounding box + max [d9numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d9branchT struct { + rect d9rectT ///< Bounds + child *d9nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d9nodeT for each branch level +type d9nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d9maxNodes]d9branchT ///< Branch +} + +func (node *d9nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d9nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d9listNodeT struct { + next *d9listNodeT ///< Next in list + node *d9nodeT ///< Node +} + +const d9notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d9partitionVarsT struct { + partition [d9maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d9rectT + area [2]float64 + + branchBuf [d9maxNodes + 1]d9branchT + branchCount int + coverSplit d9rectT + coverSplitArea float64 +} + +func d9New() *d9RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d9RTree{ + root: &d9nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d9RTree) Insert(min, max [d9numDims]float64, dataId interface{}) { + var branch d9branchT + branch.data = dataId + for axis := 0; axis < d9numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d9insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d9RTree) Remove(min, max [d9numDims]float64, dataId interface{}) { + var rect d9rectT + for axis := 0; axis < d9numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d9removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d9search rectangle +/// \param a_min Min of d9search bounding rect +/// \param a_max Max of d9search bounding rect +/// \param a_searchResult d9search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d9RTree) Search(min, max [d9numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d9rectT + for axis := 0; axis < d9numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d9search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d9RTree) Count() int { + var count int + d9countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d9RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d9nodeT{} +} + +func d9countRec(node *d9nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d9countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d9insertRectRec(branch *d9branchT, node *d9nodeT, newNode **d9nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d9nodeT + //var newBranch d9branchT + + // find the optimal branch for this record + index := d9pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d9insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d9combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d9nodeCover(node.branch[index].child) + var newBranch d9branchT + newBranch.child = otherNode + newBranch.rect = d9nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d9addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d9addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d9insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d9insertRect(branch *d9branchT, root **d9nodeT, level int) bool { + var newNode *d9nodeT + + if d9insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d9nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d9branchT + + // add old root node as a child of the new root + newBranch.rect = d9nodeCover(*root) + newBranch.child = *root + d9addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d9nodeCover(newNode) + newBranch.child = newNode + d9addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d9nodeCover(node *d9nodeT) d9rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d9combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d9addBranch(branch *d9branchT, node *d9nodeT, newNode **d9nodeT) bool { + if node.count < d9maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d9splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d9disconnectBranch(node *d9nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d9pickBranch(rect *d9rectT, node *d9nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d9rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d9calcRectVolume(curRect) + tempRect = d9combineRect(rect, curRect) + increase = d9calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d9combineRect(rectA, rectB *d9rectT) d9rectT { + var newRect d9rectT + + for index := 0; index < d9numDims; index++ { + newRect.min[index] = d9fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d9fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d9splitNode(node *d9nodeT, branch *d9branchT, newNode **d9nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d9partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d9getBranches(node, branch, parVars) + + // Find partition + d9choosePartition(parVars, d9minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d9nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d9loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d9rectVolume(rect *d9rectT) float64 { + var volume float64 = 1 + for index := 0; index < d9numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d9rectT +func d9rectSphericalVolume(rect *d9rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d9numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d9numDims == 5 { + return (radius * radius * radius * radius * radius * d9unitSphereVolume) + } else if d9numDims == 4 { + return (radius * radius * radius * radius * d9unitSphereVolume) + } else if d9numDims == 3 { + return (radius * radius * radius * d9unitSphereVolume) + } else if d9numDims == 2 { + return (radius * radius * d9unitSphereVolume) + } else { + return (math.Pow(radius, d9numDims) * d9unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d9calcRectVolume(rect *d9rectT) float64 { + if d9useSphericalVolume { + return d9rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d9rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d9getBranches(node *d9nodeT, branch *d9branchT, parVars *d9partitionVarsT) { + // Load the branch buffer + for index := 0; index < d9maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d9maxNodes] = *branch + parVars.branchCount = d9maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d9maxNodes+1; index++ { + parVars.coverSplit = d9combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d9calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d9choosePartition(parVars *d9partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d9initParVars(parVars, parVars.branchCount, minFill) + d9pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d9notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d9combineRect(curRect, &parVars.cover[0]) + rect1 := d9combineRect(curRect, &parVars.cover[1]) + growth0 := d9calcRectVolume(&rect0) - parVars.area[0] + growth1 := d9calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d9classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d9notTaken == parVars.partition[index] { + d9classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d9loadNodes(nodeA, nodeB *d9nodeT, parVars *d9partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d9nodeT{nodeA, nodeB} + + // It is assured that d9addBranch here will not cause a node split. + d9addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d9partitionVarsT structure. +func d9initParVars(parVars *d9partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d9notTaken + } +} + +func d9pickSeeds(parVars *d9partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d9maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d9calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d9combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d9calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d9classify(seed0, 0, parVars) + d9classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d9classify(index, group int, parVars *d9partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d9combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d9calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d9rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d9removeRect provides for eliminating the root. +func d9removeRect(rect *d9rectT, id interface{}, root **d9nodeT) bool { + var reInsertList *d9listNodeT + + if !d9removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d9insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d9removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d9removeRectRec(rect *d9rectT, id interface{}, node *d9nodeT, listNode **d9listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d9overlap(*rect, node.branch[index].rect) { + if !d9removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d9minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d9nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d9reInsert(node.branch[index].child, listNode) + d9disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d9disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d9overlap. +func d9overlap(rectA, rectB d9rectT) bool { + for index := 0; index < d9numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d9reInsert(node *d9nodeT, listNode **d9listNodeT) { + newListNode := &d9listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d9search in an index tree or subtree for all data retangles that d9overlap the argument rectangle. +func d9search(node *d9nodeT, rect d9rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d9overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d9search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d9overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d10fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d10fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d10numDims = 10 + d10maxNodes = 8 + d10minNodes = d10maxNodes / 2 + d10useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d10unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d10numDims] + +type d10RTree struct { + root *d10nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d10rectT struct { + min [d10numDims]float64 ///< Min dimensions of bounding box + max [d10numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d10branchT struct { + rect d10rectT ///< Bounds + child *d10nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d10nodeT for each branch level +type d10nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d10maxNodes]d10branchT ///< Branch +} + +func (node *d10nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d10nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d10listNodeT struct { + next *d10listNodeT ///< Next in list + node *d10nodeT ///< Node +} + +const d10notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d10partitionVarsT struct { + partition [d10maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d10rectT + area [2]float64 + + branchBuf [d10maxNodes + 1]d10branchT + branchCount int + coverSplit d10rectT + coverSplitArea float64 +} + +func d10New() *d10RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d10RTree{ + root: &d10nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d10RTree) Insert(min, max [d10numDims]float64, dataId interface{}) { + var branch d10branchT + branch.data = dataId + for axis := 0; axis < d10numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d10insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d10RTree) Remove(min, max [d10numDims]float64, dataId interface{}) { + var rect d10rectT + for axis := 0; axis < d10numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d10removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d10search rectangle +/// \param a_min Min of d10search bounding rect +/// \param a_max Max of d10search bounding rect +/// \param a_searchResult d10search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d10RTree) Search(min, max [d10numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d10rectT + for axis := 0; axis < d10numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d10search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d10RTree) Count() int { + var count int + d10countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d10RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d10nodeT{} +} + +func d10countRec(node *d10nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d10countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d10insertRectRec(branch *d10branchT, node *d10nodeT, newNode **d10nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d10nodeT + //var newBranch d10branchT + + // find the optimal branch for this record + index := d10pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d10insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d10combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d10nodeCover(node.branch[index].child) + var newBranch d10branchT + newBranch.child = otherNode + newBranch.rect = d10nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d10addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d10addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d10insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d10insertRect(branch *d10branchT, root **d10nodeT, level int) bool { + var newNode *d10nodeT + + if d10insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d10nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d10branchT + + // add old root node as a child of the new root + newBranch.rect = d10nodeCover(*root) + newBranch.child = *root + d10addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d10nodeCover(newNode) + newBranch.child = newNode + d10addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d10nodeCover(node *d10nodeT) d10rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d10combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d10addBranch(branch *d10branchT, node *d10nodeT, newNode **d10nodeT) bool { + if node.count < d10maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d10splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d10disconnectBranch(node *d10nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d10pickBranch(rect *d10rectT, node *d10nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d10rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d10calcRectVolume(curRect) + tempRect = d10combineRect(rect, curRect) + increase = d10calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d10combineRect(rectA, rectB *d10rectT) d10rectT { + var newRect d10rectT + + for index := 0; index < d10numDims; index++ { + newRect.min[index] = d10fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d10fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d10splitNode(node *d10nodeT, branch *d10branchT, newNode **d10nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d10partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d10getBranches(node, branch, parVars) + + // Find partition + d10choosePartition(parVars, d10minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d10nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d10loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d10rectVolume(rect *d10rectT) float64 { + var volume float64 = 1 + for index := 0; index < d10numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d10rectT +func d10rectSphericalVolume(rect *d10rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d10numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d10numDims == 5 { + return (radius * radius * radius * radius * radius * d10unitSphereVolume) + } else if d10numDims == 4 { + return (radius * radius * radius * radius * d10unitSphereVolume) + } else if d10numDims == 3 { + return (radius * radius * radius * d10unitSphereVolume) + } else if d10numDims == 2 { + return (radius * radius * d10unitSphereVolume) + } else { + return (math.Pow(radius, d10numDims) * d10unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d10calcRectVolume(rect *d10rectT) float64 { + if d10useSphericalVolume { + return d10rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d10rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d10getBranches(node *d10nodeT, branch *d10branchT, parVars *d10partitionVarsT) { + // Load the branch buffer + for index := 0; index < d10maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d10maxNodes] = *branch + parVars.branchCount = d10maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d10maxNodes+1; index++ { + parVars.coverSplit = d10combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d10calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d10choosePartition(parVars *d10partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d10initParVars(parVars, parVars.branchCount, minFill) + d10pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d10notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d10combineRect(curRect, &parVars.cover[0]) + rect1 := d10combineRect(curRect, &parVars.cover[1]) + growth0 := d10calcRectVolume(&rect0) - parVars.area[0] + growth1 := d10calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d10classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d10notTaken == parVars.partition[index] { + d10classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d10loadNodes(nodeA, nodeB *d10nodeT, parVars *d10partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d10nodeT{nodeA, nodeB} + + // It is assured that d10addBranch here will not cause a node split. + d10addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d10partitionVarsT structure. +func d10initParVars(parVars *d10partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d10notTaken + } +} + +func d10pickSeeds(parVars *d10partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d10maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d10calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d10combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d10calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d10classify(seed0, 0, parVars) + d10classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d10classify(index, group int, parVars *d10partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d10combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d10calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d10rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d10removeRect provides for eliminating the root. +func d10removeRect(rect *d10rectT, id interface{}, root **d10nodeT) bool { + var reInsertList *d10listNodeT + + if !d10removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d10insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d10removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d10removeRectRec(rect *d10rectT, id interface{}, node *d10nodeT, listNode **d10listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d10overlap(*rect, node.branch[index].rect) { + if !d10removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d10minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d10nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d10reInsert(node.branch[index].child, listNode) + d10disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d10disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d10overlap. +func d10overlap(rectA, rectB d10rectT) bool { + for index := 0; index < d10numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d10reInsert(node *d10nodeT, listNode **d10listNodeT) { + newListNode := &d10listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d10search in an index tree or subtree for all data retangles that d10overlap the argument rectangle. +func d10search(node *d10nodeT, rect d10rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d10overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d10search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d10overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d11fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d11fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d11numDims = 11 + d11maxNodes = 8 + d11minNodes = d11maxNodes / 2 + d11useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d11unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d11numDims] + +type d11RTree struct { + root *d11nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d11rectT struct { + min [d11numDims]float64 ///< Min dimensions of bounding box + max [d11numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d11branchT struct { + rect d11rectT ///< Bounds + child *d11nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d11nodeT for each branch level +type d11nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d11maxNodes]d11branchT ///< Branch +} + +func (node *d11nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d11nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d11listNodeT struct { + next *d11listNodeT ///< Next in list + node *d11nodeT ///< Node +} + +const d11notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d11partitionVarsT struct { + partition [d11maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d11rectT + area [2]float64 + + branchBuf [d11maxNodes + 1]d11branchT + branchCount int + coverSplit d11rectT + coverSplitArea float64 +} + +func d11New() *d11RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d11RTree{ + root: &d11nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d11RTree) Insert(min, max [d11numDims]float64, dataId interface{}) { + var branch d11branchT + branch.data = dataId + for axis := 0; axis < d11numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d11insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d11RTree) Remove(min, max [d11numDims]float64, dataId interface{}) { + var rect d11rectT + for axis := 0; axis < d11numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d11removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d11search rectangle +/// \param a_min Min of d11search bounding rect +/// \param a_max Max of d11search bounding rect +/// \param a_searchResult d11search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d11RTree) Search(min, max [d11numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d11rectT + for axis := 0; axis < d11numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d11search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d11RTree) Count() int { + var count int + d11countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d11RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d11nodeT{} +} + +func d11countRec(node *d11nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d11countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d11insertRectRec(branch *d11branchT, node *d11nodeT, newNode **d11nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d11nodeT + //var newBranch d11branchT + + // find the optimal branch for this record + index := d11pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d11insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d11combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d11nodeCover(node.branch[index].child) + var newBranch d11branchT + newBranch.child = otherNode + newBranch.rect = d11nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d11addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d11addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d11insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d11insertRect(branch *d11branchT, root **d11nodeT, level int) bool { + var newNode *d11nodeT + + if d11insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d11nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d11branchT + + // add old root node as a child of the new root + newBranch.rect = d11nodeCover(*root) + newBranch.child = *root + d11addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d11nodeCover(newNode) + newBranch.child = newNode + d11addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d11nodeCover(node *d11nodeT) d11rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d11combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d11addBranch(branch *d11branchT, node *d11nodeT, newNode **d11nodeT) bool { + if node.count < d11maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d11splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d11disconnectBranch(node *d11nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d11pickBranch(rect *d11rectT, node *d11nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d11rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d11calcRectVolume(curRect) + tempRect = d11combineRect(rect, curRect) + increase = d11calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d11combineRect(rectA, rectB *d11rectT) d11rectT { + var newRect d11rectT + + for index := 0; index < d11numDims; index++ { + newRect.min[index] = d11fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d11fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d11splitNode(node *d11nodeT, branch *d11branchT, newNode **d11nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d11partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d11getBranches(node, branch, parVars) + + // Find partition + d11choosePartition(parVars, d11minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d11nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d11loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d11rectVolume(rect *d11rectT) float64 { + var volume float64 = 1 + for index := 0; index < d11numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d11rectT +func d11rectSphericalVolume(rect *d11rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d11numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d11numDims == 5 { + return (radius * radius * radius * radius * radius * d11unitSphereVolume) + } else if d11numDims == 4 { + return (radius * radius * radius * radius * d11unitSphereVolume) + } else if d11numDims == 3 { + return (radius * radius * radius * d11unitSphereVolume) + } else if d11numDims == 2 { + return (radius * radius * d11unitSphereVolume) + } else { + return (math.Pow(radius, d11numDims) * d11unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d11calcRectVolume(rect *d11rectT) float64 { + if d11useSphericalVolume { + return d11rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d11rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d11getBranches(node *d11nodeT, branch *d11branchT, parVars *d11partitionVarsT) { + // Load the branch buffer + for index := 0; index < d11maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d11maxNodes] = *branch + parVars.branchCount = d11maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d11maxNodes+1; index++ { + parVars.coverSplit = d11combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d11calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d11choosePartition(parVars *d11partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d11initParVars(parVars, parVars.branchCount, minFill) + d11pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d11notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d11combineRect(curRect, &parVars.cover[0]) + rect1 := d11combineRect(curRect, &parVars.cover[1]) + growth0 := d11calcRectVolume(&rect0) - parVars.area[0] + growth1 := d11calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d11classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d11notTaken == parVars.partition[index] { + d11classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d11loadNodes(nodeA, nodeB *d11nodeT, parVars *d11partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d11nodeT{nodeA, nodeB} + + // It is assured that d11addBranch here will not cause a node split. + d11addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d11partitionVarsT structure. +func d11initParVars(parVars *d11partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d11notTaken + } +} + +func d11pickSeeds(parVars *d11partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d11maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d11calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d11combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d11calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d11classify(seed0, 0, parVars) + d11classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d11classify(index, group int, parVars *d11partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d11combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d11calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d11rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d11removeRect provides for eliminating the root. +func d11removeRect(rect *d11rectT, id interface{}, root **d11nodeT) bool { + var reInsertList *d11listNodeT + + if !d11removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d11insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d11removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d11removeRectRec(rect *d11rectT, id interface{}, node *d11nodeT, listNode **d11listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d11overlap(*rect, node.branch[index].rect) { + if !d11removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d11minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d11nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d11reInsert(node.branch[index].child, listNode) + d11disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d11disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d11overlap. +func d11overlap(rectA, rectB d11rectT) bool { + for index := 0; index < d11numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d11reInsert(node *d11nodeT, listNode **d11listNodeT) { + newListNode := &d11listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d11search in an index tree or subtree for all data retangles that d11overlap the argument rectangle. +func d11search(node *d11nodeT, rect d11rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d11overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d11search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d11overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d12fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d12fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d12numDims = 12 + d12maxNodes = 8 + d12minNodes = d12maxNodes / 2 + d12useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d12unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d12numDims] + +type d12RTree struct { + root *d12nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d12rectT struct { + min [d12numDims]float64 ///< Min dimensions of bounding box + max [d12numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d12branchT struct { + rect d12rectT ///< Bounds + child *d12nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d12nodeT for each branch level +type d12nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d12maxNodes]d12branchT ///< Branch +} + +func (node *d12nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d12nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d12listNodeT struct { + next *d12listNodeT ///< Next in list + node *d12nodeT ///< Node +} + +const d12notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d12partitionVarsT struct { + partition [d12maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d12rectT + area [2]float64 + + branchBuf [d12maxNodes + 1]d12branchT + branchCount int + coverSplit d12rectT + coverSplitArea float64 +} + +func d12New() *d12RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d12RTree{ + root: &d12nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d12RTree) Insert(min, max [d12numDims]float64, dataId interface{}) { + var branch d12branchT + branch.data = dataId + for axis := 0; axis < d12numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d12insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d12RTree) Remove(min, max [d12numDims]float64, dataId interface{}) { + var rect d12rectT + for axis := 0; axis < d12numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d12removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d12search rectangle +/// \param a_min Min of d12search bounding rect +/// \param a_max Max of d12search bounding rect +/// \param a_searchResult d12search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d12RTree) Search(min, max [d12numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d12rectT + for axis := 0; axis < d12numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d12search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d12RTree) Count() int { + var count int + d12countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d12RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d12nodeT{} +} + +func d12countRec(node *d12nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d12countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d12insertRectRec(branch *d12branchT, node *d12nodeT, newNode **d12nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d12nodeT + //var newBranch d12branchT + + // find the optimal branch for this record + index := d12pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d12insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d12combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d12nodeCover(node.branch[index].child) + var newBranch d12branchT + newBranch.child = otherNode + newBranch.rect = d12nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d12addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d12addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d12insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d12insertRect(branch *d12branchT, root **d12nodeT, level int) bool { + var newNode *d12nodeT + + if d12insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d12nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d12branchT + + // add old root node as a child of the new root + newBranch.rect = d12nodeCover(*root) + newBranch.child = *root + d12addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d12nodeCover(newNode) + newBranch.child = newNode + d12addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d12nodeCover(node *d12nodeT) d12rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d12combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d12addBranch(branch *d12branchT, node *d12nodeT, newNode **d12nodeT) bool { + if node.count < d12maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d12splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d12disconnectBranch(node *d12nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d12pickBranch(rect *d12rectT, node *d12nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d12rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d12calcRectVolume(curRect) + tempRect = d12combineRect(rect, curRect) + increase = d12calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d12combineRect(rectA, rectB *d12rectT) d12rectT { + var newRect d12rectT + + for index := 0; index < d12numDims; index++ { + newRect.min[index] = d12fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d12fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d12splitNode(node *d12nodeT, branch *d12branchT, newNode **d12nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d12partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d12getBranches(node, branch, parVars) + + // Find partition + d12choosePartition(parVars, d12minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d12nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d12loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d12rectVolume(rect *d12rectT) float64 { + var volume float64 = 1 + for index := 0; index < d12numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d12rectT +func d12rectSphericalVolume(rect *d12rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d12numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d12numDims == 5 { + return (radius * radius * radius * radius * radius * d12unitSphereVolume) + } else if d12numDims == 4 { + return (radius * radius * radius * radius * d12unitSphereVolume) + } else if d12numDims == 3 { + return (radius * radius * radius * d12unitSphereVolume) + } else if d12numDims == 2 { + return (radius * radius * d12unitSphereVolume) + } else { + return (math.Pow(radius, d12numDims) * d12unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d12calcRectVolume(rect *d12rectT) float64 { + if d12useSphericalVolume { + return d12rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d12rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d12getBranches(node *d12nodeT, branch *d12branchT, parVars *d12partitionVarsT) { + // Load the branch buffer + for index := 0; index < d12maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d12maxNodes] = *branch + parVars.branchCount = d12maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d12maxNodes+1; index++ { + parVars.coverSplit = d12combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d12calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d12choosePartition(parVars *d12partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d12initParVars(parVars, parVars.branchCount, minFill) + d12pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d12notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d12combineRect(curRect, &parVars.cover[0]) + rect1 := d12combineRect(curRect, &parVars.cover[1]) + growth0 := d12calcRectVolume(&rect0) - parVars.area[0] + growth1 := d12calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d12classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d12notTaken == parVars.partition[index] { + d12classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d12loadNodes(nodeA, nodeB *d12nodeT, parVars *d12partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d12nodeT{nodeA, nodeB} + + // It is assured that d12addBranch here will not cause a node split. + d12addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d12partitionVarsT structure. +func d12initParVars(parVars *d12partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d12notTaken + } +} + +func d12pickSeeds(parVars *d12partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d12maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d12calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d12combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d12calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d12classify(seed0, 0, parVars) + d12classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d12classify(index, group int, parVars *d12partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d12combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d12calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d12rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d12removeRect provides for eliminating the root. +func d12removeRect(rect *d12rectT, id interface{}, root **d12nodeT) bool { + var reInsertList *d12listNodeT + + if !d12removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d12insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d12removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d12removeRectRec(rect *d12rectT, id interface{}, node *d12nodeT, listNode **d12listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d12overlap(*rect, node.branch[index].rect) { + if !d12removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d12minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d12nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d12reInsert(node.branch[index].child, listNode) + d12disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d12disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d12overlap. +func d12overlap(rectA, rectB d12rectT) bool { + for index := 0; index < d12numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d12reInsert(node *d12nodeT, listNode **d12listNodeT) { + newListNode := &d12listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d12search in an index tree or subtree for all data retangles that d12overlap the argument rectangle. +func d12search(node *d12nodeT, rect d12rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d12overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d12search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d12overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d13fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d13fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d13numDims = 13 + d13maxNodes = 8 + d13minNodes = d13maxNodes / 2 + d13useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d13unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d13numDims] + +type d13RTree struct { + root *d13nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d13rectT struct { + min [d13numDims]float64 ///< Min dimensions of bounding box + max [d13numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d13branchT struct { + rect d13rectT ///< Bounds + child *d13nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d13nodeT for each branch level +type d13nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d13maxNodes]d13branchT ///< Branch +} + +func (node *d13nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d13nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d13listNodeT struct { + next *d13listNodeT ///< Next in list + node *d13nodeT ///< Node +} + +const d13notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d13partitionVarsT struct { + partition [d13maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d13rectT + area [2]float64 + + branchBuf [d13maxNodes + 1]d13branchT + branchCount int + coverSplit d13rectT + coverSplitArea float64 +} + +func d13New() *d13RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d13RTree{ + root: &d13nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d13RTree) Insert(min, max [d13numDims]float64, dataId interface{}) { + var branch d13branchT + branch.data = dataId + for axis := 0; axis < d13numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d13insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d13RTree) Remove(min, max [d13numDims]float64, dataId interface{}) { + var rect d13rectT + for axis := 0; axis < d13numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d13removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d13search rectangle +/// \param a_min Min of d13search bounding rect +/// \param a_max Max of d13search bounding rect +/// \param a_searchResult d13search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d13RTree) Search(min, max [d13numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d13rectT + for axis := 0; axis < d13numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d13search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d13RTree) Count() int { + var count int + d13countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d13RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d13nodeT{} +} + +func d13countRec(node *d13nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d13countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d13insertRectRec(branch *d13branchT, node *d13nodeT, newNode **d13nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d13nodeT + //var newBranch d13branchT + + // find the optimal branch for this record + index := d13pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d13insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d13combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d13nodeCover(node.branch[index].child) + var newBranch d13branchT + newBranch.child = otherNode + newBranch.rect = d13nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d13addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d13addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d13insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d13insertRect(branch *d13branchT, root **d13nodeT, level int) bool { + var newNode *d13nodeT + + if d13insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d13nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d13branchT + + // add old root node as a child of the new root + newBranch.rect = d13nodeCover(*root) + newBranch.child = *root + d13addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d13nodeCover(newNode) + newBranch.child = newNode + d13addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d13nodeCover(node *d13nodeT) d13rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d13combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d13addBranch(branch *d13branchT, node *d13nodeT, newNode **d13nodeT) bool { + if node.count < d13maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d13splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d13disconnectBranch(node *d13nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d13pickBranch(rect *d13rectT, node *d13nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d13rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d13calcRectVolume(curRect) + tempRect = d13combineRect(rect, curRect) + increase = d13calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d13combineRect(rectA, rectB *d13rectT) d13rectT { + var newRect d13rectT + + for index := 0; index < d13numDims; index++ { + newRect.min[index] = d13fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d13fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d13splitNode(node *d13nodeT, branch *d13branchT, newNode **d13nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d13partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d13getBranches(node, branch, parVars) + + // Find partition + d13choosePartition(parVars, d13minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d13nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d13loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d13rectVolume(rect *d13rectT) float64 { + var volume float64 = 1 + for index := 0; index < d13numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d13rectT +func d13rectSphericalVolume(rect *d13rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d13numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d13numDims == 5 { + return (radius * radius * radius * radius * radius * d13unitSphereVolume) + } else if d13numDims == 4 { + return (radius * radius * radius * radius * d13unitSphereVolume) + } else if d13numDims == 3 { + return (radius * radius * radius * d13unitSphereVolume) + } else if d13numDims == 2 { + return (radius * radius * d13unitSphereVolume) + } else { + return (math.Pow(radius, d13numDims) * d13unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d13calcRectVolume(rect *d13rectT) float64 { + if d13useSphericalVolume { + return d13rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d13rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d13getBranches(node *d13nodeT, branch *d13branchT, parVars *d13partitionVarsT) { + // Load the branch buffer + for index := 0; index < d13maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d13maxNodes] = *branch + parVars.branchCount = d13maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d13maxNodes+1; index++ { + parVars.coverSplit = d13combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d13calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d13choosePartition(parVars *d13partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d13initParVars(parVars, parVars.branchCount, minFill) + d13pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d13notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d13combineRect(curRect, &parVars.cover[0]) + rect1 := d13combineRect(curRect, &parVars.cover[1]) + growth0 := d13calcRectVolume(&rect0) - parVars.area[0] + growth1 := d13calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d13classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d13notTaken == parVars.partition[index] { + d13classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d13loadNodes(nodeA, nodeB *d13nodeT, parVars *d13partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d13nodeT{nodeA, nodeB} + + // It is assured that d13addBranch here will not cause a node split. + d13addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d13partitionVarsT structure. +func d13initParVars(parVars *d13partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d13notTaken + } +} + +func d13pickSeeds(parVars *d13partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d13maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d13calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d13combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d13calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d13classify(seed0, 0, parVars) + d13classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d13classify(index, group int, parVars *d13partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d13combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d13calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d13rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d13removeRect provides for eliminating the root. +func d13removeRect(rect *d13rectT, id interface{}, root **d13nodeT) bool { + var reInsertList *d13listNodeT + + if !d13removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d13insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d13removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d13removeRectRec(rect *d13rectT, id interface{}, node *d13nodeT, listNode **d13listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d13overlap(*rect, node.branch[index].rect) { + if !d13removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d13minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d13nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d13reInsert(node.branch[index].child, listNode) + d13disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d13disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d13overlap. +func d13overlap(rectA, rectB d13rectT) bool { + for index := 0; index < d13numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d13reInsert(node *d13nodeT, listNode **d13listNodeT) { + newListNode := &d13listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d13search in an index tree or subtree for all data retangles that d13overlap the argument rectangle. +func d13search(node *d13nodeT, rect d13rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d13overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d13search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d13overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d14fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d14fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d14numDims = 14 + d14maxNodes = 8 + d14minNodes = d14maxNodes / 2 + d14useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d14unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d14numDims] + +type d14RTree struct { + root *d14nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d14rectT struct { + min [d14numDims]float64 ///< Min dimensions of bounding box + max [d14numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d14branchT struct { + rect d14rectT ///< Bounds + child *d14nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d14nodeT for each branch level +type d14nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d14maxNodes]d14branchT ///< Branch +} + +func (node *d14nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d14nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d14listNodeT struct { + next *d14listNodeT ///< Next in list + node *d14nodeT ///< Node +} + +const d14notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d14partitionVarsT struct { + partition [d14maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d14rectT + area [2]float64 + + branchBuf [d14maxNodes + 1]d14branchT + branchCount int + coverSplit d14rectT + coverSplitArea float64 +} + +func d14New() *d14RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d14RTree{ + root: &d14nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d14RTree) Insert(min, max [d14numDims]float64, dataId interface{}) { + var branch d14branchT + branch.data = dataId + for axis := 0; axis < d14numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d14insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d14RTree) Remove(min, max [d14numDims]float64, dataId interface{}) { + var rect d14rectT + for axis := 0; axis < d14numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d14removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d14search rectangle +/// \param a_min Min of d14search bounding rect +/// \param a_max Max of d14search bounding rect +/// \param a_searchResult d14search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d14RTree) Search(min, max [d14numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d14rectT + for axis := 0; axis < d14numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d14search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d14RTree) Count() int { + var count int + d14countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d14RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d14nodeT{} +} + +func d14countRec(node *d14nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d14countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d14insertRectRec(branch *d14branchT, node *d14nodeT, newNode **d14nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d14nodeT + //var newBranch d14branchT + + // find the optimal branch for this record + index := d14pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d14insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d14combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d14nodeCover(node.branch[index].child) + var newBranch d14branchT + newBranch.child = otherNode + newBranch.rect = d14nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d14addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d14addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d14insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d14insertRect(branch *d14branchT, root **d14nodeT, level int) bool { + var newNode *d14nodeT + + if d14insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d14nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d14branchT + + // add old root node as a child of the new root + newBranch.rect = d14nodeCover(*root) + newBranch.child = *root + d14addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d14nodeCover(newNode) + newBranch.child = newNode + d14addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d14nodeCover(node *d14nodeT) d14rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d14combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d14addBranch(branch *d14branchT, node *d14nodeT, newNode **d14nodeT) bool { + if node.count < d14maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d14splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d14disconnectBranch(node *d14nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d14pickBranch(rect *d14rectT, node *d14nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d14rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d14calcRectVolume(curRect) + tempRect = d14combineRect(rect, curRect) + increase = d14calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d14combineRect(rectA, rectB *d14rectT) d14rectT { + var newRect d14rectT + + for index := 0; index < d14numDims; index++ { + newRect.min[index] = d14fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d14fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d14splitNode(node *d14nodeT, branch *d14branchT, newNode **d14nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d14partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d14getBranches(node, branch, parVars) + + // Find partition + d14choosePartition(parVars, d14minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d14nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d14loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d14rectVolume(rect *d14rectT) float64 { + var volume float64 = 1 + for index := 0; index < d14numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d14rectT +func d14rectSphericalVolume(rect *d14rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d14numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d14numDims == 5 { + return (radius * radius * radius * radius * radius * d14unitSphereVolume) + } else if d14numDims == 4 { + return (radius * radius * radius * radius * d14unitSphereVolume) + } else if d14numDims == 3 { + return (radius * radius * radius * d14unitSphereVolume) + } else if d14numDims == 2 { + return (radius * radius * d14unitSphereVolume) + } else { + return (math.Pow(radius, d14numDims) * d14unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d14calcRectVolume(rect *d14rectT) float64 { + if d14useSphericalVolume { + return d14rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d14rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d14getBranches(node *d14nodeT, branch *d14branchT, parVars *d14partitionVarsT) { + // Load the branch buffer + for index := 0; index < d14maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d14maxNodes] = *branch + parVars.branchCount = d14maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d14maxNodes+1; index++ { + parVars.coverSplit = d14combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d14calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d14choosePartition(parVars *d14partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d14initParVars(parVars, parVars.branchCount, minFill) + d14pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d14notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d14combineRect(curRect, &parVars.cover[0]) + rect1 := d14combineRect(curRect, &parVars.cover[1]) + growth0 := d14calcRectVolume(&rect0) - parVars.area[0] + growth1 := d14calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d14classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d14notTaken == parVars.partition[index] { + d14classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d14loadNodes(nodeA, nodeB *d14nodeT, parVars *d14partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d14nodeT{nodeA, nodeB} + + // It is assured that d14addBranch here will not cause a node split. + d14addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d14partitionVarsT structure. +func d14initParVars(parVars *d14partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d14notTaken + } +} + +func d14pickSeeds(parVars *d14partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d14maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d14calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d14combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d14calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d14classify(seed0, 0, parVars) + d14classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d14classify(index, group int, parVars *d14partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d14combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d14calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d14rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d14removeRect provides for eliminating the root. +func d14removeRect(rect *d14rectT, id interface{}, root **d14nodeT) bool { + var reInsertList *d14listNodeT + + if !d14removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d14insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d14removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d14removeRectRec(rect *d14rectT, id interface{}, node *d14nodeT, listNode **d14listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d14overlap(*rect, node.branch[index].rect) { + if !d14removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d14minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d14nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d14reInsert(node.branch[index].child, listNode) + d14disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d14disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d14overlap. +func d14overlap(rectA, rectB d14rectT) bool { + for index := 0; index < d14numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d14reInsert(node *d14nodeT, listNode **d14listNodeT) { + newListNode := &d14listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d14search in an index tree or subtree for all data retangles that d14overlap the argument rectangle. +func d14search(node *d14nodeT, rect d14rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d14overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d14search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d14overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d15fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d15fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d15numDims = 15 + d15maxNodes = 8 + d15minNodes = d15maxNodes / 2 + d15useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d15unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d15numDims] + +type d15RTree struct { + root *d15nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d15rectT struct { + min [d15numDims]float64 ///< Min dimensions of bounding box + max [d15numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d15branchT struct { + rect d15rectT ///< Bounds + child *d15nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d15nodeT for each branch level +type d15nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d15maxNodes]d15branchT ///< Branch +} + +func (node *d15nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d15nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d15listNodeT struct { + next *d15listNodeT ///< Next in list + node *d15nodeT ///< Node +} + +const d15notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d15partitionVarsT struct { + partition [d15maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d15rectT + area [2]float64 + + branchBuf [d15maxNodes + 1]d15branchT + branchCount int + coverSplit d15rectT + coverSplitArea float64 +} + +func d15New() *d15RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d15RTree{ + root: &d15nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d15RTree) Insert(min, max [d15numDims]float64, dataId interface{}) { + var branch d15branchT + branch.data = dataId + for axis := 0; axis < d15numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d15insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d15RTree) Remove(min, max [d15numDims]float64, dataId interface{}) { + var rect d15rectT + for axis := 0; axis < d15numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d15removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d15search rectangle +/// \param a_min Min of d15search bounding rect +/// \param a_max Max of d15search bounding rect +/// \param a_searchResult d15search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d15RTree) Search(min, max [d15numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d15rectT + for axis := 0; axis < d15numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d15search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d15RTree) Count() int { + var count int + d15countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d15RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d15nodeT{} +} + +func d15countRec(node *d15nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d15countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d15insertRectRec(branch *d15branchT, node *d15nodeT, newNode **d15nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d15nodeT + //var newBranch d15branchT + + // find the optimal branch for this record + index := d15pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d15insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d15combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d15nodeCover(node.branch[index].child) + var newBranch d15branchT + newBranch.child = otherNode + newBranch.rect = d15nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d15addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d15addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d15insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d15insertRect(branch *d15branchT, root **d15nodeT, level int) bool { + var newNode *d15nodeT + + if d15insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d15nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d15branchT + + // add old root node as a child of the new root + newBranch.rect = d15nodeCover(*root) + newBranch.child = *root + d15addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d15nodeCover(newNode) + newBranch.child = newNode + d15addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d15nodeCover(node *d15nodeT) d15rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d15combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d15addBranch(branch *d15branchT, node *d15nodeT, newNode **d15nodeT) bool { + if node.count < d15maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d15splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d15disconnectBranch(node *d15nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d15pickBranch(rect *d15rectT, node *d15nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d15rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d15calcRectVolume(curRect) + tempRect = d15combineRect(rect, curRect) + increase = d15calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d15combineRect(rectA, rectB *d15rectT) d15rectT { + var newRect d15rectT + + for index := 0; index < d15numDims; index++ { + newRect.min[index] = d15fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d15fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d15splitNode(node *d15nodeT, branch *d15branchT, newNode **d15nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d15partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d15getBranches(node, branch, parVars) + + // Find partition + d15choosePartition(parVars, d15minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d15nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d15loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d15rectVolume(rect *d15rectT) float64 { + var volume float64 = 1 + for index := 0; index < d15numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d15rectT +func d15rectSphericalVolume(rect *d15rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d15numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d15numDims == 5 { + return (radius * radius * radius * radius * radius * d15unitSphereVolume) + } else if d15numDims == 4 { + return (radius * radius * radius * radius * d15unitSphereVolume) + } else if d15numDims == 3 { + return (radius * radius * radius * d15unitSphereVolume) + } else if d15numDims == 2 { + return (radius * radius * d15unitSphereVolume) + } else { + return (math.Pow(radius, d15numDims) * d15unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d15calcRectVolume(rect *d15rectT) float64 { + if d15useSphericalVolume { + return d15rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d15rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d15getBranches(node *d15nodeT, branch *d15branchT, parVars *d15partitionVarsT) { + // Load the branch buffer + for index := 0; index < d15maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d15maxNodes] = *branch + parVars.branchCount = d15maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d15maxNodes+1; index++ { + parVars.coverSplit = d15combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d15calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d15choosePartition(parVars *d15partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d15initParVars(parVars, parVars.branchCount, minFill) + d15pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d15notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d15combineRect(curRect, &parVars.cover[0]) + rect1 := d15combineRect(curRect, &parVars.cover[1]) + growth0 := d15calcRectVolume(&rect0) - parVars.area[0] + growth1 := d15calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d15classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d15notTaken == parVars.partition[index] { + d15classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d15loadNodes(nodeA, nodeB *d15nodeT, parVars *d15partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d15nodeT{nodeA, nodeB} + + // It is assured that d15addBranch here will not cause a node split. + d15addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d15partitionVarsT structure. +func d15initParVars(parVars *d15partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d15notTaken + } +} + +func d15pickSeeds(parVars *d15partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d15maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d15calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d15combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d15calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d15classify(seed0, 0, parVars) + d15classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d15classify(index, group int, parVars *d15partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d15combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d15calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d15rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d15removeRect provides for eliminating the root. +func d15removeRect(rect *d15rectT, id interface{}, root **d15nodeT) bool { + var reInsertList *d15listNodeT + + if !d15removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d15insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d15removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d15removeRectRec(rect *d15rectT, id interface{}, node *d15nodeT, listNode **d15listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d15overlap(*rect, node.branch[index].rect) { + if !d15removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d15minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d15nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d15reInsert(node.branch[index].child, listNode) + d15disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d15disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d15overlap. +func d15overlap(rectA, rectB d15rectT) bool { + for index := 0; index < d15numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d15reInsert(node *d15nodeT, listNode **d15listNodeT) { + newListNode := &d15listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d15search in an index tree or subtree for all data retangles that d15overlap the argument rectangle. +func d15search(node *d15nodeT, rect d15rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d15overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d15search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d15overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d16fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d16fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d16numDims = 16 + d16maxNodes = 8 + d16minNodes = d16maxNodes / 2 + d16useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d16unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d16numDims] + +type d16RTree struct { + root *d16nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d16rectT struct { + min [d16numDims]float64 ///< Min dimensions of bounding box + max [d16numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d16branchT struct { + rect d16rectT ///< Bounds + child *d16nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d16nodeT for each branch level +type d16nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d16maxNodes]d16branchT ///< Branch +} + +func (node *d16nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d16nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d16listNodeT struct { + next *d16listNodeT ///< Next in list + node *d16nodeT ///< Node +} + +const d16notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d16partitionVarsT struct { + partition [d16maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d16rectT + area [2]float64 + + branchBuf [d16maxNodes + 1]d16branchT + branchCount int + coverSplit d16rectT + coverSplitArea float64 +} + +func d16New() *d16RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d16RTree{ + root: &d16nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d16RTree) Insert(min, max [d16numDims]float64, dataId interface{}) { + var branch d16branchT + branch.data = dataId + for axis := 0; axis < d16numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d16insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d16RTree) Remove(min, max [d16numDims]float64, dataId interface{}) { + var rect d16rectT + for axis := 0; axis < d16numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d16removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d16search rectangle +/// \param a_min Min of d16search bounding rect +/// \param a_max Max of d16search bounding rect +/// \param a_searchResult d16search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d16RTree) Search(min, max [d16numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d16rectT + for axis := 0; axis < d16numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d16search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d16RTree) Count() int { + var count int + d16countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d16RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d16nodeT{} +} + +func d16countRec(node *d16nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d16countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d16insertRectRec(branch *d16branchT, node *d16nodeT, newNode **d16nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d16nodeT + //var newBranch d16branchT + + // find the optimal branch for this record + index := d16pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d16insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d16combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d16nodeCover(node.branch[index].child) + var newBranch d16branchT + newBranch.child = otherNode + newBranch.rect = d16nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d16addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d16addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d16insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d16insertRect(branch *d16branchT, root **d16nodeT, level int) bool { + var newNode *d16nodeT + + if d16insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d16nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d16branchT + + // add old root node as a child of the new root + newBranch.rect = d16nodeCover(*root) + newBranch.child = *root + d16addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d16nodeCover(newNode) + newBranch.child = newNode + d16addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d16nodeCover(node *d16nodeT) d16rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d16combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d16addBranch(branch *d16branchT, node *d16nodeT, newNode **d16nodeT) bool { + if node.count < d16maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d16splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d16disconnectBranch(node *d16nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d16pickBranch(rect *d16rectT, node *d16nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d16rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d16calcRectVolume(curRect) + tempRect = d16combineRect(rect, curRect) + increase = d16calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d16combineRect(rectA, rectB *d16rectT) d16rectT { + var newRect d16rectT + + for index := 0; index < d16numDims; index++ { + newRect.min[index] = d16fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d16fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d16splitNode(node *d16nodeT, branch *d16branchT, newNode **d16nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d16partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d16getBranches(node, branch, parVars) + + // Find partition + d16choosePartition(parVars, d16minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d16nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d16loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d16rectVolume(rect *d16rectT) float64 { + var volume float64 = 1 + for index := 0; index < d16numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d16rectT +func d16rectSphericalVolume(rect *d16rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d16numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d16numDims == 5 { + return (radius * radius * radius * radius * radius * d16unitSphereVolume) + } else if d16numDims == 4 { + return (radius * radius * radius * radius * d16unitSphereVolume) + } else if d16numDims == 3 { + return (radius * radius * radius * d16unitSphereVolume) + } else if d16numDims == 2 { + return (radius * radius * d16unitSphereVolume) + } else { + return (math.Pow(radius, d16numDims) * d16unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d16calcRectVolume(rect *d16rectT) float64 { + if d16useSphericalVolume { + return d16rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d16rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d16getBranches(node *d16nodeT, branch *d16branchT, parVars *d16partitionVarsT) { + // Load the branch buffer + for index := 0; index < d16maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d16maxNodes] = *branch + parVars.branchCount = d16maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d16maxNodes+1; index++ { + parVars.coverSplit = d16combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d16calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d16choosePartition(parVars *d16partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d16initParVars(parVars, parVars.branchCount, minFill) + d16pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d16notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d16combineRect(curRect, &parVars.cover[0]) + rect1 := d16combineRect(curRect, &parVars.cover[1]) + growth0 := d16calcRectVolume(&rect0) - parVars.area[0] + growth1 := d16calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d16classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d16notTaken == parVars.partition[index] { + d16classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d16loadNodes(nodeA, nodeB *d16nodeT, parVars *d16partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d16nodeT{nodeA, nodeB} + + // It is assured that d16addBranch here will not cause a node split. + d16addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d16partitionVarsT structure. +func d16initParVars(parVars *d16partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d16notTaken + } +} + +func d16pickSeeds(parVars *d16partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d16maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d16calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d16combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d16calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d16classify(seed0, 0, parVars) + d16classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d16classify(index, group int, parVars *d16partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d16combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d16calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d16rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d16removeRect provides for eliminating the root. +func d16removeRect(rect *d16rectT, id interface{}, root **d16nodeT) bool { + var reInsertList *d16listNodeT + + if !d16removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d16insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d16removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d16removeRectRec(rect *d16rectT, id interface{}, node *d16nodeT, listNode **d16listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d16overlap(*rect, node.branch[index].rect) { + if !d16removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d16minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d16nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d16reInsert(node.branch[index].child, listNode) + d16disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d16disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d16overlap. +func d16overlap(rectA, rectB d16rectT) bool { + for index := 0; index < d16numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d16reInsert(node *d16nodeT, listNode **d16listNodeT) { + newListNode := &d16listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d16search in an index tree or subtree for all data retangles that d16overlap the argument rectangle. +func d16search(node *d16nodeT, rect d16rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d16overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d16search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d16overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d17fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d17fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d17numDims = 17 + d17maxNodes = 8 + d17minNodes = d17maxNodes / 2 + d17useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d17unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d17numDims] + +type d17RTree struct { + root *d17nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d17rectT struct { + min [d17numDims]float64 ///< Min dimensions of bounding box + max [d17numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d17branchT struct { + rect d17rectT ///< Bounds + child *d17nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d17nodeT for each branch level +type d17nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d17maxNodes]d17branchT ///< Branch +} + +func (node *d17nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d17nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d17listNodeT struct { + next *d17listNodeT ///< Next in list + node *d17nodeT ///< Node +} + +const d17notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d17partitionVarsT struct { + partition [d17maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d17rectT + area [2]float64 + + branchBuf [d17maxNodes + 1]d17branchT + branchCount int + coverSplit d17rectT + coverSplitArea float64 +} + +func d17New() *d17RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d17RTree{ + root: &d17nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d17RTree) Insert(min, max [d17numDims]float64, dataId interface{}) { + var branch d17branchT + branch.data = dataId + for axis := 0; axis < d17numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d17insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d17RTree) Remove(min, max [d17numDims]float64, dataId interface{}) { + var rect d17rectT + for axis := 0; axis < d17numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d17removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d17search rectangle +/// \param a_min Min of d17search bounding rect +/// \param a_max Max of d17search bounding rect +/// \param a_searchResult d17search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d17RTree) Search(min, max [d17numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d17rectT + for axis := 0; axis < d17numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d17search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d17RTree) Count() int { + var count int + d17countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d17RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d17nodeT{} +} + +func d17countRec(node *d17nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d17countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d17insertRectRec(branch *d17branchT, node *d17nodeT, newNode **d17nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d17nodeT + //var newBranch d17branchT + + // find the optimal branch for this record + index := d17pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d17insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d17combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d17nodeCover(node.branch[index].child) + var newBranch d17branchT + newBranch.child = otherNode + newBranch.rect = d17nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d17addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d17addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d17insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d17insertRect(branch *d17branchT, root **d17nodeT, level int) bool { + var newNode *d17nodeT + + if d17insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d17nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d17branchT + + // add old root node as a child of the new root + newBranch.rect = d17nodeCover(*root) + newBranch.child = *root + d17addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d17nodeCover(newNode) + newBranch.child = newNode + d17addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d17nodeCover(node *d17nodeT) d17rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d17combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d17addBranch(branch *d17branchT, node *d17nodeT, newNode **d17nodeT) bool { + if node.count < d17maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d17splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d17disconnectBranch(node *d17nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d17pickBranch(rect *d17rectT, node *d17nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d17rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d17calcRectVolume(curRect) + tempRect = d17combineRect(rect, curRect) + increase = d17calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d17combineRect(rectA, rectB *d17rectT) d17rectT { + var newRect d17rectT + + for index := 0; index < d17numDims; index++ { + newRect.min[index] = d17fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d17fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d17splitNode(node *d17nodeT, branch *d17branchT, newNode **d17nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d17partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d17getBranches(node, branch, parVars) + + // Find partition + d17choosePartition(parVars, d17minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d17nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d17loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d17rectVolume(rect *d17rectT) float64 { + var volume float64 = 1 + for index := 0; index < d17numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d17rectT +func d17rectSphericalVolume(rect *d17rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d17numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d17numDims == 5 { + return (radius * radius * radius * radius * radius * d17unitSphereVolume) + } else if d17numDims == 4 { + return (radius * radius * radius * radius * d17unitSphereVolume) + } else if d17numDims == 3 { + return (radius * radius * radius * d17unitSphereVolume) + } else if d17numDims == 2 { + return (radius * radius * d17unitSphereVolume) + } else { + return (math.Pow(radius, d17numDims) * d17unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d17calcRectVolume(rect *d17rectT) float64 { + if d17useSphericalVolume { + return d17rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d17rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d17getBranches(node *d17nodeT, branch *d17branchT, parVars *d17partitionVarsT) { + // Load the branch buffer + for index := 0; index < d17maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d17maxNodes] = *branch + parVars.branchCount = d17maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d17maxNodes+1; index++ { + parVars.coverSplit = d17combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d17calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d17choosePartition(parVars *d17partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d17initParVars(parVars, parVars.branchCount, minFill) + d17pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d17notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d17combineRect(curRect, &parVars.cover[0]) + rect1 := d17combineRect(curRect, &parVars.cover[1]) + growth0 := d17calcRectVolume(&rect0) - parVars.area[0] + growth1 := d17calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d17classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d17notTaken == parVars.partition[index] { + d17classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d17loadNodes(nodeA, nodeB *d17nodeT, parVars *d17partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d17nodeT{nodeA, nodeB} + + // It is assured that d17addBranch here will not cause a node split. + d17addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d17partitionVarsT structure. +func d17initParVars(parVars *d17partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d17notTaken + } +} + +func d17pickSeeds(parVars *d17partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d17maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d17calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d17combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d17calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d17classify(seed0, 0, parVars) + d17classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d17classify(index, group int, parVars *d17partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d17combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d17calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d17rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d17removeRect provides for eliminating the root. +func d17removeRect(rect *d17rectT, id interface{}, root **d17nodeT) bool { + var reInsertList *d17listNodeT + + if !d17removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d17insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d17removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d17removeRectRec(rect *d17rectT, id interface{}, node *d17nodeT, listNode **d17listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d17overlap(*rect, node.branch[index].rect) { + if !d17removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d17minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d17nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d17reInsert(node.branch[index].child, listNode) + d17disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d17disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d17overlap. +func d17overlap(rectA, rectB d17rectT) bool { + for index := 0; index < d17numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d17reInsert(node *d17nodeT, listNode **d17listNodeT) { + newListNode := &d17listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d17search in an index tree or subtree for all data retangles that d17overlap the argument rectangle. +func d17search(node *d17nodeT, rect d17rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d17overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d17search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d17overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d18fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d18fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d18numDims = 18 + d18maxNodes = 8 + d18minNodes = d18maxNodes / 2 + d18useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d18unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d18numDims] + +type d18RTree struct { + root *d18nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d18rectT struct { + min [d18numDims]float64 ///< Min dimensions of bounding box + max [d18numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d18branchT struct { + rect d18rectT ///< Bounds + child *d18nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d18nodeT for each branch level +type d18nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d18maxNodes]d18branchT ///< Branch +} + +func (node *d18nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d18nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d18listNodeT struct { + next *d18listNodeT ///< Next in list + node *d18nodeT ///< Node +} + +const d18notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d18partitionVarsT struct { + partition [d18maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d18rectT + area [2]float64 + + branchBuf [d18maxNodes + 1]d18branchT + branchCount int + coverSplit d18rectT + coverSplitArea float64 +} + +func d18New() *d18RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d18RTree{ + root: &d18nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d18RTree) Insert(min, max [d18numDims]float64, dataId interface{}) { + var branch d18branchT + branch.data = dataId + for axis := 0; axis < d18numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d18insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d18RTree) Remove(min, max [d18numDims]float64, dataId interface{}) { + var rect d18rectT + for axis := 0; axis < d18numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d18removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d18search rectangle +/// \param a_min Min of d18search bounding rect +/// \param a_max Max of d18search bounding rect +/// \param a_searchResult d18search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d18RTree) Search(min, max [d18numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d18rectT + for axis := 0; axis < d18numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d18search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d18RTree) Count() int { + var count int + d18countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d18RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d18nodeT{} +} + +func d18countRec(node *d18nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d18countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d18insertRectRec(branch *d18branchT, node *d18nodeT, newNode **d18nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d18nodeT + //var newBranch d18branchT + + // find the optimal branch for this record + index := d18pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d18insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d18combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d18nodeCover(node.branch[index].child) + var newBranch d18branchT + newBranch.child = otherNode + newBranch.rect = d18nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d18addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d18addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d18insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d18insertRect(branch *d18branchT, root **d18nodeT, level int) bool { + var newNode *d18nodeT + + if d18insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d18nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d18branchT + + // add old root node as a child of the new root + newBranch.rect = d18nodeCover(*root) + newBranch.child = *root + d18addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d18nodeCover(newNode) + newBranch.child = newNode + d18addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d18nodeCover(node *d18nodeT) d18rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d18combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d18addBranch(branch *d18branchT, node *d18nodeT, newNode **d18nodeT) bool { + if node.count < d18maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d18splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d18disconnectBranch(node *d18nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d18pickBranch(rect *d18rectT, node *d18nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d18rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d18calcRectVolume(curRect) + tempRect = d18combineRect(rect, curRect) + increase = d18calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d18combineRect(rectA, rectB *d18rectT) d18rectT { + var newRect d18rectT + + for index := 0; index < d18numDims; index++ { + newRect.min[index] = d18fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d18fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d18splitNode(node *d18nodeT, branch *d18branchT, newNode **d18nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d18partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d18getBranches(node, branch, parVars) + + // Find partition + d18choosePartition(parVars, d18minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d18nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d18loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d18rectVolume(rect *d18rectT) float64 { + var volume float64 = 1 + for index := 0; index < d18numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d18rectT +func d18rectSphericalVolume(rect *d18rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d18numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d18numDims == 5 { + return (radius * radius * radius * radius * radius * d18unitSphereVolume) + } else if d18numDims == 4 { + return (radius * radius * radius * radius * d18unitSphereVolume) + } else if d18numDims == 3 { + return (radius * radius * radius * d18unitSphereVolume) + } else if d18numDims == 2 { + return (radius * radius * d18unitSphereVolume) + } else { + return (math.Pow(radius, d18numDims) * d18unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d18calcRectVolume(rect *d18rectT) float64 { + if d18useSphericalVolume { + return d18rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d18rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d18getBranches(node *d18nodeT, branch *d18branchT, parVars *d18partitionVarsT) { + // Load the branch buffer + for index := 0; index < d18maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d18maxNodes] = *branch + parVars.branchCount = d18maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d18maxNodes+1; index++ { + parVars.coverSplit = d18combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d18calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d18choosePartition(parVars *d18partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d18initParVars(parVars, parVars.branchCount, minFill) + d18pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d18notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d18combineRect(curRect, &parVars.cover[0]) + rect1 := d18combineRect(curRect, &parVars.cover[1]) + growth0 := d18calcRectVolume(&rect0) - parVars.area[0] + growth1 := d18calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d18classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d18notTaken == parVars.partition[index] { + d18classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d18loadNodes(nodeA, nodeB *d18nodeT, parVars *d18partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d18nodeT{nodeA, nodeB} + + // It is assured that d18addBranch here will not cause a node split. + d18addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d18partitionVarsT structure. +func d18initParVars(parVars *d18partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d18notTaken + } +} + +func d18pickSeeds(parVars *d18partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d18maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d18calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d18combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d18calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d18classify(seed0, 0, parVars) + d18classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d18classify(index, group int, parVars *d18partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d18combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d18calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d18rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d18removeRect provides for eliminating the root. +func d18removeRect(rect *d18rectT, id interface{}, root **d18nodeT) bool { + var reInsertList *d18listNodeT + + if !d18removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d18insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d18removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d18removeRectRec(rect *d18rectT, id interface{}, node *d18nodeT, listNode **d18listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d18overlap(*rect, node.branch[index].rect) { + if !d18removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d18minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d18nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d18reInsert(node.branch[index].child, listNode) + d18disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d18disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d18overlap. +func d18overlap(rectA, rectB d18rectT) bool { + for index := 0; index < d18numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d18reInsert(node *d18nodeT, listNode **d18listNodeT) { + newListNode := &d18listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d18search in an index tree or subtree for all data retangles that d18overlap the argument rectangle. +func d18search(node *d18nodeT, rect d18rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d18overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d18search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d18overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d19fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d19fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d19numDims = 19 + d19maxNodes = 8 + d19minNodes = d19maxNodes / 2 + d19useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d19unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d19numDims] + +type d19RTree struct { + root *d19nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d19rectT struct { + min [d19numDims]float64 ///< Min dimensions of bounding box + max [d19numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d19branchT struct { + rect d19rectT ///< Bounds + child *d19nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d19nodeT for each branch level +type d19nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d19maxNodes]d19branchT ///< Branch +} + +func (node *d19nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d19nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d19listNodeT struct { + next *d19listNodeT ///< Next in list + node *d19nodeT ///< Node +} + +const d19notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d19partitionVarsT struct { + partition [d19maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d19rectT + area [2]float64 + + branchBuf [d19maxNodes + 1]d19branchT + branchCount int + coverSplit d19rectT + coverSplitArea float64 +} + +func d19New() *d19RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d19RTree{ + root: &d19nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d19RTree) Insert(min, max [d19numDims]float64, dataId interface{}) { + var branch d19branchT + branch.data = dataId + for axis := 0; axis < d19numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d19insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d19RTree) Remove(min, max [d19numDims]float64, dataId interface{}) { + var rect d19rectT + for axis := 0; axis < d19numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d19removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d19search rectangle +/// \param a_min Min of d19search bounding rect +/// \param a_max Max of d19search bounding rect +/// \param a_searchResult d19search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d19RTree) Search(min, max [d19numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d19rectT + for axis := 0; axis < d19numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d19search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d19RTree) Count() int { + var count int + d19countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d19RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d19nodeT{} +} + +func d19countRec(node *d19nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d19countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d19insertRectRec(branch *d19branchT, node *d19nodeT, newNode **d19nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d19nodeT + //var newBranch d19branchT + + // find the optimal branch for this record + index := d19pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d19insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d19combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d19nodeCover(node.branch[index].child) + var newBranch d19branchT + newBranch.child = otherNode + newBranch.rect = d19nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d19addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d19addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d19insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d19insertRect(branch *d19branchT, root **d19nodeT, level int) bool { + var newNode *d19nodeT + + if d19insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d19nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d19branchT + + // add old root node as a child of the new root + newBranch.rect = d19nodeCover(*root) + newBranch.child = *root + d19addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d19nodeCover(newNode) + newBranch.child = newNode + d19addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d19nodeCover(node *d19nodeT) d19rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d19combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d19addBranch(branch *d19branchT, node *d19nodeT, newNode **d19nodeT) bool { + if node.count < d19maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d19splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d19disconnectBranch(node *d19nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d19pickBranch(rect *d19rectT, node *d19nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d19rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d19calcRectVolume(curRect) + tempRect = d19combineRect(rect, curRect) + increase = d19calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d19combineRect(rectA, rectB *d19rectT) d19rectT { + var newRect d19rectT + + for index := 0; index < d19numDims; index++ { + newRect.min[index] = d19fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d19fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d19splitNode(node *d19nodeT, branch *d19branchT, newNode **d19nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d19partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d19getBranches(node, branch, parVars) + + // Find partition + d19choosePartition(parVars, d19minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d19nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d19loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d19rectVolume(rect *d19rectT) float64 { + var volume float64 = 1 + for index := 0; index < d19numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d19rectT +func d19rectSphericalVolume(rect *d19rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d19numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d19numDims == 5 { + return (radius * radius * radius * radius * radius * d19unitSphereVolume) + } else if d19numDims == 4 { + return (radius * radius * radius * radius * d19unitSphereVolume) + } else if d19numDims == 3 { + return (radius * radius * radius * d19unitSphereVolume) + } else if d19numDims == 2 { + return (radius * radius * d19unitSphereVolume) + } else { + return (math.Pow(radius, d19numDims) * d19unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d19calcRectVolume(rect *d19rectT) float64 { + if d19useSphericalVolume { + return d19rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d19rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d19getBranches(node *d19nodeT, branch *d19branchT, parVars *d19partitionVarsT) { + // Load the branch buffer + for index := 0; index < d19maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d19maxNodes] = *branch + parVars.branchCount = d19maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d19maxNodes+1; index++ { + parVars.coverSplit = d19combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d19calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d19choosePartition(parVars *d19partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d19initParVars(parVars, parVars.branchCount, minFill) + d19pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d19notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d19combineRect(curRect, &parVars.cover[0]) + rect1 := d19combineRect(curRect, &parVars.cover[1]) + growth0 := d19calcRectVolume(&rect0) - parVars.area[0] + growth1 := d19calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d19classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d19notTaken == parVars.partition[index] { + d19classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d19loadNodes(nodeA, nodeB *d19nodeT, parVars *d19partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d19nodeT{nodeA, nodeB} + + // It is assured that d19addBranch here will not cause a node split. + d19addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d19partitionVarsT structure. +func d19initParVars(parVars *d19partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d19notTaken + } +} + +func d19pickSeeds(parVars *d19partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d19maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d19calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d19combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d19calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d19classify(seed0, 0, parVars) + d19classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d19classify(index, group int, parVars *d19partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d19combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d19calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d19rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d19removeRect provides for eliminating the root. +func d19removeRect(rect *d19rectT, id interface{}, root **d19nodeT) bool { + var reInsertList *d19listNodeT + + if !d19removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d19insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d19removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d19removeRectRec(rect *d19rectT, id interface{}, node *d19nodeT, listNode **d19listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d19overlap(*rect, node.branch[index].rect) { + if !d19removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d19minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d19nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d19reInsert(node.branch[index].child, listNode) + d19disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d19disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d19overlap. +func d19overlap(rectA, rectB d19rectT) bool { + for index := 0; index < d19numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d19reInsert(node *d19nodeT, listNode **d19listNodeT) { + newListNode := &d19listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d19search in an index tree or subtree for all data retangles that d19overlap the argument rectangle. +func d19search(node *d19nodeT, rect d19rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d19overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d19search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d19overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d20fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d20fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d20numDims = 20 + d20maxNodes = 8 + d20minNodes = d20maxNodes / 2 + d20useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d20unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d20numDims] + +type d20RTree struct { + root *d20nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d20rectT struct { + min [d20numDims]float64 ///< Min dimensions of bounding box + max [d20numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d20branchT struct { + rect d20rectT ///< Bounds + child *d20nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d20nodeT for each branch level +type d20nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d20maxNodes]d20branchT ///< Branch +} + +func (node *d20nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d20nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d20listNodeT struct { + next *d20listNodeT ///< Next in list + node *d20nodeT ///< Node +} + +const d20notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d20partitionVarsT struct { + partition [d20maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d20rectT + area [2]float64 + + branchBuf [d20maxNodes + 1]d20branchT + branchCount int + coverSplit d20rectT + coverSplitArea float64 +} + +func d20New() *d20RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d20RTree{ + root: &d20nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d20RTree) Insert(min, max [d20numDims]float64, dataId interface{}) { + var branch d20branchT + branch.data = dataId + for axis := 0; axis < d20numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d20insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d20RTree) Remove(min, max [d20numDims]float64, dataId interface{}) { + var rect d20rectT + for axis := 0; axis < d20numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d20removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d20search rectangle +/// \param a_min Min of d20search bounding rect +/// \param a_max Max of d20search bounding rect +/// \param a_searchResult d20search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d20RTree) Search(min, max [d20numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d20rectT + for axis := 0; axis < d20numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d20search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d20RTree) Count() int { + var count int + d20countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d20RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d20nodeT{} +} + +func d20countRec(node *d20nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d20countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d20insertRectRec(branch *d20branchT, node *d20nodeT, newNode **d20nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d20nodeT + //var newBranch d20branchT + + // find the optimal branch for this record + index := d20pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d20insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d20combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d20nodeCover(node.branch[index].child) + var newBranch d20branchT + newBranch.child = otherNode + newBranch.rect = d20nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d20addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d20addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d20insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d20insertRect(branch *d20branchT, root **d20nodeT, level int) bool { + var newNode *d20nodeT + + if d20insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d20nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d20branchT + + // add old root node as a child of the new root + newBranch.rect = d20nodeCover(*root) + newBranch.child = *root + d20addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d20nodeCover(newNode) + newBranch.child = newNode + d20addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d20nodeCover(node *d20nodeT) d20rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d20combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d20addBranch(branch *d20branchT, node *d20nodeT, newNode **d20nodeT) bool { + if node.count < d20maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d20splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d20disconnectBranch(node *d20nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d20pickBranch(rect *d20rectT, node *d20nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d20rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d20calcRectVolume(curRect) + tempRect = d20combineRect(rect, curRect) + increase = d20calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d20combineRect(rectA, rectB *d20rectT) d20rectT { + var newRect d20rectT + + for index := 0; index < d20numDims; index++ { + newRect.min[index] = d20fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d20fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d20splitNode(node *d20nodeT, branch *d20branchT, newNode **d20nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d20partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d20getBranches(node, branch, parVars) + + // Find partition + d20choosePartition(parVars, d20minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d20nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d20loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d20rectVolume(rect *d20rectT) float64 { + var volume float64 = 1 + for index := 0; index < d20numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d20rectT +func d20rectSphericalVolume(rect *d20rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d20numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d20numDims == 5 { + return (radius * radius * radius * radius * radius * d20unitSphereVolume) + } else if d20numDims == 4 { + return (radius * radius * radius * radius * d20unitSphereVolume) + } else if d20numDims == 3 { + return (radius * radius * radius * d20unitSphereVolume) + } else if d20numDims == 2 { + return (radius * radius * d20unitSphereVolume) + } else { + return (math.Pow(radius, d20numDims) * d20unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d20calcRectVolume(rect *d20rectT) float64 { + if d20useSphericalVolume { + return d20rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d20rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d20getBranches(node *d20nodeT, branch *d20branchT, parVars *d20partitionVarsT) { + // Load the branch buffer + for index := 0; index < d20maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d20maxNodes] = *branch + parVars.branchCount = d20maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d20maxNodes+1; index++ { + parVars.coverSplit = d20combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d20calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d20choosePartition(parVars *d20partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d20initParVars(parVars, parVars.branchCount, minFill) + d20pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d20notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d20combineRect(curRect, &parVars.cover[0]) + rect1 := d20combineRect(curRect, &parVars.cover[1]) + growth0 := d20calcRectVolume(&rect0) - parVars.area[0] + growth1 := d20calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d20classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d20notTaken == parVars.partition[index] { + d20classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d20loadNodes(nodeA, nodeB *d20nodeT, parVars *d20partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d20nodeT{nodeA, nodeB} + + // It is assured that d20addBranch here will not cause a node split. + d20addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d20partitionVarsT structure. +func d20initParVars(parVars *d20partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d20notTaken + } +} + +func d20pickSeeds(parVars *d20partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d20maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d20calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d20combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d20calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d20classify(seed0, 0, parVars) + d20classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d20classify(index, group int, parVars *d20partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d20combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d20calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d20rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d20removeRect provides for eliminating the root. +func d20removeRect(rect *d20rectT, id interface{}, root **d20nodeT) bool { + var reInsertList *d20listNodeT + + if !d20removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d20insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d20removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d20removeRectRec(rect *d20rectT, id interface{}, node *d20nodeT, listNode **d20listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d20overlap(*rect, node.branch[index].rect) { + if !d20removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d20minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d20nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d20reInsert(node.branch[index].child, listNode) + d20disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d20disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d20overlap. +func d20overlap(rectA, rectB d20rectT) bool { + for index := 0; index < d20numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d20reInsert(node *d20nodeT, listNode **d20listNodeT) { + newListNode := &d20listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d20search in an index tree or subtree for all data retangles that d20overlap the argument rectangle. +func d20search(node *d20nodeT, rect d20rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d20overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d20search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d20overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000..fc31160 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000..f8b807f --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "golang.org/x/crypto/blowfish" + "io" + "strconv" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + ckey := append(key, 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n += 1 + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n += 1 + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 0000000..fa9e48e --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,194 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm as +// defined in RFC 7693. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var errKeySize = errors.New("blake2b: invalid key size") + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(b []byte) []byte { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + var sum [Size]byte + for i, v := range h[:(d.size+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } + + return append(b, sum[:d.size]...) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 0000000..8c41cf6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +func init() { + useAVX2 = supportsAVX2() + useAVX = supportsAVX() + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func supportsAVX() bool + +//go:noescape +func supportsAVX2() bool + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useAVX2 { + hashBlocksAVX2(h, c, flag, blocks) + } else if useAVX { + hashBlocksAVX(h, c, flag, blocks) + } else if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 0000000..784bce6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,762 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET + +// func supportsAVX2() bool +TEXT ·supportsAVX2(SB), 4, $0-1 + MOVQ runtime·support_avx2(SB), AX + MOVB AX, ret+0(FP) + RET + +// func supportsAVX() bool +TEXT ·supportsAVX(SB), 4, $0-1 + MOVQ runtime·support_avx(SB), AX + MOVB AX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 0000000..2ab7c30 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +func init() { + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 0000000..6453074 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,290 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET + +// func supportsSSE4() bool +TEXT ·supportsSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4 support + ANDL $1, CX // CX != 0 if support SSE4 + MOVB CX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 0000000..4bd2abc --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import "encoding/binary" + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 0000000..da156a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 0000000..efd689a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000..9d80f19 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000..a73954f --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See http://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000..8c5ee4c --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// http://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000..593f653 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 0000000..7455395 --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,243 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (http://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "errors" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + x := xy + y := xy[32*r:] + + j := 0 + for i := 0; i < 32*r; i++ { + x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24 + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*(32*r):], x, 32*r) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*(32*r):], y, 32*r) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*(32*r):], 32*r) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*(32*r):], 32*r) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:32*r] { + b[j+0] = byte(v >> 0) + b[j+1] = byte(v >> 8) + b[j+2] = byte(v >> 16) + b[j+3] = byte(v >> 24) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2009 are N=16384, +// r=8, p=1. They should be increased as memory latency and CPU parallelism +// increases. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/gopkg.in/hlandau/easymetric.v1/cexp/cexp.go b/vendor/gopkg.in/hlandau/easymetric.v1/cexp/cexp.go new file mode 100644 index 0000000..fd6d16a --- /dev/null +++ b/vendor/gopkg.in/hlandau/easymetric.v1/cexp/cexp.go @@ -0,0 +1,97 @@ +package cexp + +import "gopkg.in/hlandau/measurable.v1" +import "sync/atomic" + +// Counter + +type Counter struct { + name string + value int64 +} + +func (c *Counter) MsName() string { + return c.name +} + +func (c *Counter) MsInt64() int64 { + return atomic.LoadInt64(&c.value) +} + +func (c *Counter) Get() int64 { + return c.MsInt64() +} + +// v must be non-negative. +func (c *Counter) Add(v int64) { + atomic.AddInt64(&c.value, v) +} + +func (c *Counter) Inc() { + c.Add(1) +} + +func (c *Counter) MsType() measurable.Type { + return measurable.CounterType +} + +func NewCounter(name string) *Counter { + c := &Counter{ + name: name, + } + + measurable.Register(c) + return c +} + +// Gauge + +type Gauge struct { + name string + value int64 +} + +func (c *Gauge) MsName() string { + return c.name +} + +func (c *Gauge) MsInt64() int64 { + return atomic.LoadInt64(&c.value) +} + +func (c *Gauge) Add(v int64) { + atomic.AddInt64(&c.value, v) +} + +func (c *Gauge) Sub(v int64) { + c.Add(-v) +} + +func (c *Gauge) Set(v int64) { + atomic.StoreInt64(&c.value, v) +} + +func (c *Gauge) Get() int64 { + return c.MsInt64() +} + +func (c *Gauge) Inc() { + c.Add(1) +} + +func (c *Gauge) Dec() { + c.Add(-1) +} + +func (c *Gauge) MsType() measurable.Type { + return measurable.GaugeType +} + +func NewGauge(name string) *Gauge { + c := &Gauge{ + name: name, + } + + measurable.Register(c) + return c +} diff --git a/vendor/gopkg.in/hlandau/measurable.v1/README.md b/vendor/gopkg.in/hlandau/measurable.v1/README.md new file mode 100644 index 0000000..f2d582d --- /dev/null +++ b/vendor/gopkg.in/hlandau/measurable.v1/README.md @@ -0,0 +1,82 @@ +Measurable: The useless Go metric registration package that doesn't do anything +=============================================================================== + +[![GoDoc](https://godoc.org/gopkg.in/hlandau/measurable.v1?status.svg)](https://godoc.org/gopkg.in/hlandau/measurable.v1) + +Measurable is a Go library for managing the registration of metrics such as +counters and gauges, no matter how that metric data is eventually consumed. + +The most noteworthy feature of measurable is that it doesn't do anything. It +contains no functionality for providing metric data to any external service, +and it contains no actual metric implementations. + +The purpose of measurable is to act as an [integration +nexus](https://www.devever.net/~hl/nexuses), essentially a matchmaker between +metric sources and metric consumers. This creates the important feature that +your application's metrics can be expressed completely independently of *how* +those metrics are exported. + +Measurable doesn't implement any metric or metric export logic because it +strives to be a neutral intermediary, which abstracts the interface between +metrics and metric exporters. + +**Import as:** `gopkg.in/hlandau/measurable.v1` + +Measurable +---------- + +A Measurable is an object that represents some metric. It is obliged only to +implement the following interface: + +```go +type Measurable interface { + MsName() string + MsType() Type +} +``` + +Measurable is designed around interface upgrades. If you want to actually +do anything with a Measurable, you must attempt to cast it to an interface +with the methods you need. A Measurable is not obliged to implement any +interface besides Measurable, but almost always will. + +Here are some common interfaces implemented by Measurables, in descending order +of importance: + + - `MsName() string` — get the Measurable name. + - `MsType() Type` — get the Measurable type. + - `MsInt64() int64` — get the Measurable as an int64. + - `String() string` — the standard Go `String()` interface. + +All Measurables should implement `MsName() string` and `MsType() Type`. + +Measurable-specific methods should always be prefixed by `Ms` so it is clear +they are intended for consumption by Measurable consumers. + +`MsName`, `MsType` and `MsInt64` should suffice for most consumers of Counter +and Gauge metric types. + +Metrics should be named in lowercase using dots to create a hierarchy and +dashes to separate words, e.g. `someserver.http.request-count`. These metric +names may be transmuted by consumers as necessary for some graphing systems, +such as Prometheus (which allows only underscores). + +Standard Bindings +----------------- + +For a package which makes it easy to register and consume measurables, see the +[easymetric](https://github.com/hlandau/easymetric) package. + +Of course, nothing requires you to use the easymetric package. You are free to escew it and make your own. + +Background Reading +------------------ + + - [On Nexuses](https://www.devever.net/~hl/nexuses) + - See also: [Configurable](https://github.com/hlandau/configurable) + +Licence +------- + + © 2015 Hugo Landau MIT License + diff --git a/vendor/gopkg.in/hlandau/measurable.v1/measurable.go b/vendor/gopkg.in/hlandau/measurable.v1/measurable.go new file mode 100644 index 0000000..4399d0a --- /dev/null +++ b/vendor/gopkg.in/hlandau/measurable.v1/measurable.go @@ -0,0 +1,189 @@ +// Package measurable provides a functionality-free integration nexus for +// metric registration. +// +// Measurable is a Go package for connecting service metrics and metric consumers. +// +// The most noteworthy feature of measurable is that it doesn't do anything. +// It contains no functionality for defining or exporting metrics. +// +// The purpose of measurable is to act as an integration nexus +// (https://www.devever.net/~hl/nexuses), essentially a matchmaker between +// application metrics and metric consumers. This creates the important feature +// that your application's metrics can be defined completely independently of +// *how* those metrics are defined. +// +// Measurable doesn't implement any metric definition or export logic because it +// strives to be a neutral intermediary, which abstracts the interface between +// measurables and measurable consumers +// +// Pursuant to this, package measurable is this and only this: an interface +// Measurable which all metrics must implement, and a facility for registering +// Measurables and visiting them. +package measurable // import "gopkg.in/hlandau/measurable.v1" + +import "sync" +import "fmt" + +// Measurable is the interface which must be implemented by any metric item to +// be used with package measurable. In the current version, v1, it contains +// only the MsName() and MsType() methods. All other functionality must be +// obtained by interface upgrades. +type Measurable interface { + // Returns the name of the metric. Names should be in the style + // "alpha.beta.gamma-delta", for example "foo.http.requests.count". That is, + // names should be lowercase, should express a hierarchy separated by dots, + // and have words separated by dashes. + // + // Some Measurable consumers may mutate these names to satisfy naming + // restrictions applied by some graphing systems. + MsName() string + + // Return the Measurable type. You can, of course, invent your own Measurable + // types, though consumers won't necessarily know what to do with them. + MsType() Type +} + +var measurablesMutex sync.RWMutex +var measurables = map[string]Measurable{} + +// Registers a top-level Configurable. +func Register(measurable Measurable) { + measurablesMutex.Lock() + defer measurablesMutex.Unlock() + + if measurable == nil { + panic("cannot register nil measurable") + } + + name := measurable.MsName() + if name == "" { + panic("measurable cannot have empty name") + } + + _, exists := measurables[name] + if exists { + panic(fmt.Sprintf("A measurable with the same name already exists: %s", name)) + } + + measurables[name] = measurable + callRegistrationHooks(measurable, RegisterEvent) +} + +func Unregister(measurableName string) { + measurablesMutex.Lock() + defer measurablesMutex.Unlock() + + measurable, ok := measurables[measurableName] + if !ok { + return + } + + callRegistrationHooks(measurable, UnregisterEvent) + delete(measurables, measurableName) +} + +func Get(measurableName string) Measurable { + measurablesMutex.RLock() + defer measurablesMutex.RUnlock() + + return measurables[measurableName] +} + +// Visits all registered top-level Measurables. +// +// Returning a non-nil error short-circuits the iteration process and returns +// that error. +func Visit(do func(measurable Measurable) error) error { + measurablesMutex.Lock() + defer measurablesMutex.Unlock() + + for _, measurable := range measurables { + err := do(measurable) + if err != nil { + return err + } + } + + return nil +} + +// Represents a measurable type. +type Type uint32 + +const ( + // A CounterType Measurable represents a non-negative integral value + // that monotonously increases. It must implement `MsInt64() int64`. + CounterType Type = 0x436E7472 + + // A GaugeType Measurable represents an integral value that varies over + // time. It must implement `MsInt64() int64`. + GaugeType = 0x47617567 +) + +// Registration hooks. +type HookEvent int + +const ( + // This event is issued when a measurable is registered. + RegisterEvent HookEvent = iota + + // This event is issued when a registration hook is registered. It is issued + // for every measurable which has already been registered. + RegisterCatchupEvent + + // This event is issued when a measurable is unregistered. + UnregisterEvent +) + +type HookFunc func(measurable Measurable, hookEvent HookEvent) + +var hooksMutex sync.RWMutex +var hooks = map[interface{}]HookFunc{} + +// Register for notifications on metric registration. The key must be usable as +// a key in a map and identifies the hook. No other hook with the same key must +// already exist. +// +// NOTE: The hook will be called for all registrations which already exist. +// This ensures that no registrations are missed in a threadsafe manner. +// For these calls, the event will be EventRegisterCatchup. +// +// The hook must not register or unregister registration hooks or metrics. +func RegisterHook(key interface{}, hook HookFunc) { + measurablesMutex.RLock() + defer measurablesMutex.RUnlock() + + registerHook(key, hook) + + for _, m := range measurables { + hook(m, RegisterCatchupEvent) + } +} + +func registerHook(key interface{}, hook HookFunc) { + hooksMutex.Lock() + defer hooksMutex.Unlock() + + _, exists := hooks[key] + if exists { + panic(fmt.Sprintf("A metric registration hook with the same key already exists: %+v", key)) + } + + hooks[key] = hook +} + +// Unregister an existing hook. +func UnregisterHook(key interface{}) { + hooksMutex.Lock() + defer hooksMutex.Unlock() + delete(hooks, key) +} + +func callRegistrationHooks(measurable Measurable, event HookEvent) { + hooksMutex.RLock() + defer hooksMutex.RUnlock() + + for _, v := range hooks { + v(measurable, event) + } +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/COPYING b/vendor/gopkg.in/hlandau/passlib.v1/COPYING new file mode 100644 index 0000000..d2aa62a --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/COPYING @@ -0,0 +1,39 @@ +passlib is a Golang password verification library strongly inspired by and +derived from Python passlib (). The BSD +license is preserved and extended to all new code. + +License for Passlib +=================== +Passlib is (c) `Assurance Technologies `_, +and is released under the `BSD license `_:: + + Passlib + Copyright (c) 2008-2012 Assurance Technologies, LLC. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of Assurance Technologies, nor the names of the + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/hlandau/passlib.v1/abstract/compare.go b/vendor/gopkg.in/hlandau/passlib.v1/abstract/compare.go new file mode 100644 index 0000000..d336f13 --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/abstract/compare.go @@ -0,0 +1,11 @@ +package abstract + +import "crypto/subtle" + +// Compares two strings (typicaly password hashes) in a secure, constant-time +// fashion. Returns true iff they are equal. +func SecureCompare(a, b string) bool { + ab := []byte(a) + bb := []byte(b) + return subtle.ConstantTimeCompare(ab, bb) == 1 +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/abstract/errors.go b/vendor/gopkg.in/hlandau/passlib.v1/abstract/errors.go new file mode 100644 index 0000000..3298f82 --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/abstract/errors.go @@ -0,0 +1,15 @@ +// Package abstract contains the abstract description of the Scheme interface, +// plus supporting error definitions. +package abstract + +import "fmt" + +// Indicates that password verification failed because the provided password +// does not match the provided hash. +var ErrInvalidPassword = fmt.Errorf("invalid password") + +// Indicates that password verification is not possible because the hashing +// scheme used by the hash provided is not supported. +var ErrUnsupportedScheme = fmt.Errorf("unsupported scheme") + +// © 2014 Hugo Landau MIT License diff --git a/vendor/gopkg.in/hlandau/passlib.v1/abstract/scheme.go b/vendor/gopkg.in/hlandau/passlib.v1/abstract/scheme.go new file mode 100644 index 0000000..3589022 --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/abstract/scheme.go @@ -0,0 +1,34 @@ +package abstract + +// The Scheme interface provides an abstract interface to an implementation +// of a particular password hashing scheme. The Scheme generates password +// hashes from passwords, verifies passwords using password hashes, randomly +// generates new stubs and can determines whether it recognises a given +// stub or hash. It may also decide to issue upgrades. +type Scheme interface { + // Hashes a plaintext UTF-8 password using a modular crypt stub. Returns the + // hashed password in modular crypt format. + // + // A modular crypt stub is a prefix of a hash in modular crypt format which + // expresses all necessary configuration information, such as salt and + // iteration count. For example, for sha256-crypt, a valid stub would be: + // + // $5$rounds=6000$salt + // + // A full modular crypt hash may also be passed as the stub, in which case + // the hash is ignored. + Hash(password string) (string, error) + + // Verifies a plaintext UTF-8 password using a modular crypt hash. Returns + // an error if the inputs are malformed or the password does not match. + Verify(password, hash string) (err error) + + // Returns true iff this crypter supports the given stub. + SupportsStub(stub string) bool + + // Returns true iff this stub needs an update. + NeedsUpdate(stub string) bool + + // Make a stub with the configured defaults. The salt is generated randomly. + //MakeStub() (string, error) +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/bcrypt/bcrypt.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/bcrypt/bcrypt.go new file mode 100644 index 0000000..fbcc38d --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/bcrypt/bcrypt.go @@ -0,0 +1,72 @@ +// Package bcrypt implements the bcrypt password hashing mechanism. +// +// Please note that bcrypt truncates passwords to 72 characters in length. Consider using +// a more modern hashing scheme such as scrypt or sha-crypt. If you must use bcrypt, +// consider using bcrypt-sha256 instead. +package bcrypt + +import "golang.org/x/crypto/bcrypt" +import "gopkg.in/hlandau/passlib.v1/abstract" +import "fmt" + +// An implementation of Scheme implementing bcrypt. +// +// Uses RecommendedCost. +var Crypter abstract.Scheme + +// The recommended cost for bcrypt. This may change with subsequent releases. +const RecommendedCost = 12 + +// bcrypt.DefaultCost is a bit low (10), so use 12 instead. + +func init() { + Crypter = New(RecommendedCost) +} + +// Create a new scheme implementing bcrypt. The recommended cost is RecommendedCost. +func New(cost int) abstract.Scheme { + return &scheme{ + Cost: cost, + } +} + +type scheme struct { + Cost int +} + +func (s *scheme) SupportsStub(stub string) bool { + return len(stub) >= 3 && stub[0] == '$' && stub[1] == '2' && + (stub[2] == '$' || (len(stub) >= 4 && stub[3] == '$' && + (stub[2] == 'a' || stub[2] == 'b' || stub[2] == 'y'))) +} + +func (s *scheme) Hash(password string) (string, error) { + h, err := bcrypt.GenerateFromPassword([]byte(password), s.Cost) + if err != nil { + return "", err + } + + return string(h), nil +} + +func (s *scheme) Verify(password, hash string) error { + err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + if err == bcrypt.ErrMismatchedHashAndPassword { + err = abstract.ErrInvalidPassword + } + + return err +} + +func (s *scheme) NeedsUpdate(stub string) bool { + cost, err := bcrypt.Cost([]byte(stub)) + if err != nil { + return false + } + + return cost < s.Cost +} + +func (s *scheme) String() string { + return fmt.Sprintf("bcrypt(%d)", s.Cost) +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/bcryptsha256/bcryptsha256.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/bcryptsha256/bcryptsha256.go new file mode 100644 index 0000000..e11d083 --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/bcryptsha256/bcryptsha256.go @@ -0,0 +1,96 @@ +// Package bcryptsha256 implements bcrypt with a SHA256 prehash in a format that is compatible with Python passlib's equivalent bcrypt-sha256 scheme. +// +// This is preferred over bcrypt because the prehash essentially renders bcrypt's password length +// limitation irrelevant; although of course it is less compatible. +package bcryptsha256 + +import "gopkg.in/hlandau/passlib.v1/abstract" +import "gopkg.in/hlandau/passlib.v1/hash/bcrypt" +import "encoding/base64" +import "crypto/sha256" +import "strings" +import "fmt" + +type scheme struct { + underlying abstract.Scheme + cost int +} + +// An implementation of Scheme implementing Python passlib's `$bcrypt-sha256$` +// bcrypt variant. This is bcrypt with a SHA256 prehash, which removes bcrypt's +// password length limitation. +var Crypter abstract.Scheme + +// The recommended cost for bcrypt-sha256. This may change with subsequent releases. +const RecommendedCost = bcrypt.RecommendedCost + +func init() { + Crypter = New(bcrypt.RecommendedCost) +} + +// Instantiates a new Scheme implementing bcrypt with the given cost. +// +// The recommended cost is RecommendedCost. +func New(cost int) abstract.Scheme { + return &scheme{ + underlying: bcrypt.New(cost), + cost: cost, + } +} + +func (s *scheme) Hash(password string) (string, error) { + p := s.prehash(password) + h, err := s.underlying.Hash(p) + if err != nil { + return "", err + } + + return mangle(h), nil +} + +func (s *scheme) Verify(password, hash string) error { + p := s.prehash(password) + return s.underlying.Verify(p, demangle(hash)) +} + +func (s *scheme) prehash(password string) string { + h := sha256.New() + h.Write([]byte(password)) + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return v +} + +func (s *scheme) SupportsStub(stub string) bool { + return strings.HasPrefix(stub, "$bcrypt-sha256$") && s.underlying.SupportsStub(demangle(stub)) +} + +func (s *scheme) NeedsUpdate(stub string) bool { + return s.underlying.NeedsUpdate(demangle(stub)) +} + +func (s *scheme) String() string { + return fmt.Sprintf("bcrypt-sha256(%d)", s.cost) +} + +func demangle(stub string) string { + if strings.HasPrefix(stub, "$bcrypt-sha256$2") { + parts := strings.Split(stub[15:], "$") + // 0: 2a,12 + // 1: salt + // 2: hash + parts0 := strings.Split(parts[0], ",") + return "$" + parts0[0] + "$" + fmt.Sprintf("%02s", parts0[1]) + "$" + parts[1] + parts[2] + } else { + return stub + } +} + +func mangle(hash string) string { + parts := strings.Split(hash[1:], "$") + // 0: 2a + // 1: rounds + // 2: salt + hash + salt := parts[2][0:22] + h := parts[2][22:] + return "$bcrypt-sha256$" + parts[0] + "," + parts[1] + "$" + salt + "$" + h +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/scrypt/raw/scrypt.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/scrypt/raw/scrypt.go new file mode 100644 index 0000000..d2718b2 --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/scrypt/raw/scrypt.go @@ -0,0 +1,95 @@ +// Package raw provides a raw implementation of the modular-crypt-wrapped scrypt primitive. +package raw + +import "golang.org/x/crypto/scrypt" +import "encoding/base64" +import "strings" +import "strconv" +import "fmt" + +// The current recommended N value for interactive logins. +const RecommendedN = 16384 + +// The current recommended r value for interactive logins. +const Recommendedr = 8 + +// The current recommended p value for interactive logins. +const Recommendedp = 1 + +// Wrapper for golang.org/x/crypto/scrypt implementing a sensible +// modular crypt interface. +// +// password should be a UTF-8 plaintext password. +// salt should be a random salt value in binary form. +// +// N, r and p are parameters to scrypt. +// +// Returns a modular crypt hash. +func ScryptSHA256(password string, salt []byte, N, r, p int) string { + passwordb := []byte(password) + + hash, err := scrypt.Key(passwordb, salt, N, r, p, 32) + if err != nil { + panic(err) + } + + hstr := base64.StdEncoding.EncodeToString(hash) + sstr := base64.StdEncoding.EncodeToString(salt) + + return fmt.Sprintf("$s2$%d$%d$%d$%s$%s", N, r, p, sstr, hstr) +} + +// Indicates that a password hash or stub is invalid. +var ErrInvalidStub = fmt.Errorf("invalid scrypt password stub") + +// Parses an scrypt modular hash or stub string. +// +// The format is as follows: +// +// $s2$N$r$p$salt$hash // hash +// $s2$N$r$p$salt // stub +// +func Parse(stub string) (salt, hash []byte, N, r, p int, err error) { + if len(stub) < 10 || !strings.HasPrefix(stub, "$s2$") { + err = ErrInvalidStub + return + } + + // $s2$ N$r$p$salt-base64$hash-base64 + parts := strings.Split(stub[4:], "$") + + if len(parts) < 4 { + err = ErrInvalidStub + return + } + + var Ni, ri, pi uint64 + + Ni, err = strconv.ParseUint(parts[0], 10, 31) + if err != nil { + return + } + + ri, err = strconv.ParseUint(parts[1], 10, 31) + if err != nil { + return + } + + pi, err = strconv.ParseUint(parts[2], 10, 31) + if err != nil { + return + } + + N, r, p = int(Ni), int(ri), int(pi) + + salt, err = base64.StdEncoding.DecodeString(parts[3]) + if err != nil { + return + } + + if len(parts) >= 5 { + hash, err = base64.StdEncoding.DecodeString(parts[4]) + } + + return +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/scrypt/scrypt.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/scrypt/scrypt.go new file mode 100644 index 0000000..039182c --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/scrypt/scrypt.go @@ -0,0 +1,113 @@ +// Package scrypt implements the scrypt password hashing mechanism, wrapped in +// the modular crypt format. +package scrypt + +import "fmt" +import "expvar" +import "strings" +import "crypto/rand" +import "encoding/base64" +import "gopkg.in/hlandau/passlib.v1/hash/scrypt/raw" +import "gopkg.in/hlandau/passlib.v1/abstract" + +var cScryptSHA256HashCalls = expvar.NewInt("passlib.scryptsha256.hashCalls") +var cScryptSHA256VerifyCalls = expvar.NewInt("passlib.scryptsha256.verifyCalls") + +// An implementation of Scheme performing scrypt-sha256. +// +// Uses the recommended values for N,r,p defined in raw. +var SHA256Crypter abstract.Scheme + +func init() { + SHA256Crypter = NewSHA256( + raw.RecommendedN, + raw.Recommendedr, + raw.Recommendedp, + ) +} + +// Returns an implementation of Scheme implementing scrypt-sha256 +// with the specified parameters. +func NewSHA256(N, r, p int) abstract.Scheme { + return &scryptSHA256Crypter{ + nN: N, + r: r, + p: p, + } +} + +type scryptSHA256Crypter struct { + nN, r, p int +} + +func (c *scryptSHA256Crypter) SetParams(N, r, p int) error { + c.nN = N + c.r = r + c.p = p + return nil +} + +func (c *scryptSHA256Crypter) SupportsStub(stub string) bool { + return strings.HasPrefix(stub, "$s2$") +} + +func (c *scryptSHA256Crypter) Hash(password string) (string, error) { + cScryptSHA256HashCalls.Add(1) + + stub, err := c.makeStub() + if err != nil { + return "", err + } + + _, newHash, _, _, _, _, err := c.hash(password, stub) + return newHash, err +} + +func (c *scryptSHA256Crypter) Verify(password, hash string) (err error) { + cScryptSHA256VerifyCalls.Add(1) + + _, newHash, _, _, _, _, err := c.hash(password, hash) + if err == nil && !abstract.SecureCompare(hash, newHash) { + err = abstract.ErrInvalidPassword + } + + return +} + +func (c *scryptSHA256Crypter) NeedsUpdate(stub string) bool { + salt, _, N, r, p, err := raw.Parse(stub) + if err != nil { + return false // ... + } + + return c.needsUpdate(salt, N, r, p) +} + +func (c *scryptSHA256Crypter) needsUpdate(salt []byte, N, r, p int) bool { + return len(salt) < 18 || N < c.nN || r < c.r || p < c.p +} + +func (c *scryptSHA256Crypter) hash(password, stub string) (oldHashRaw []byte, newHash string, salt []byte, N, r, p int, err error) { + salt, oldHashRaw, N, r, p, err = raw.Parse(stub) + if err != nil { + return + } + + return oldHashRaw, raw.ScryptSHA256(password, salt, N, r, p), salt, N, r, p, nil +} + +func (c *scryptSHA256Crypter) makeStub() (string, error) { + buf := make([]byte, 18) + _, err := rand.Read(buf) + if err != nil { + return "", err + } + + salt := base64.StdEncoding.EncodeToString(buf) + + return fmt.Sprintf("$s2$%d$%d$%d$%s", c.nN, c.r, c.p, salt), nil +} + +func (c *scryptSHA256Crypter) String() string { + return fmt.Sprintf("scrypt-sha256(%d,%d,%d)", c.nN, c.r, c.p) +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/base64.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/base64.go new file mode 100644 index 0000000..2f96e6e --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/base64.go @@ -0,0 +1,34 @@ +package raw + +const bmap = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + +// Encodes a byte string using the sha2-crypt base64 variant. +func EncodeBase64(b []byte) string { + o := make([]byte, len(b)/3*4+4) + + for i, j := 0, 0; i < len(b); { + b1 := b[i] + b2 := byte(0) + b3 := byte(0) + + if (i + 1) < len(b) { + b2 = b[i+1] + } + if (i + 2) < len(b) { + b3 = b[i+2] + } + + o[j] = bmap[(b1 & 0x3F)] + o[j+1] = bmap[((b1&0xC0)>>6)|((b2&0x0F)<<2)] + o[j+2] = bmap[((b2&0xF0)>>4)|((b3&0x03)<<4)] + o[j+3] = bmap[(b3&0xFC)>>2] + i += 3 + j += 4 + } + + s := string(o) + return s[0 : len(b)*4/3-(len(b)%4)+1] +} + +// © 2008-2012 Assurance Technologies LLC. (Python passlib) BSD License +// © 2014 Hugo Landau BSD License diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/parse.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/parse.go new file mode 100644 index 0000000..9e1fcfc --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/parse.go @@ -0,0 +1,82 @@ +package raw + +import "fmt" +import "strings" +import "strconv" + +// Indicates that a password hash or stub is invalid. +var ErrInvalidStub = fmt.Errorf("invalid stub") + +// Indicates that the number of rounds specified is not in the valid range. +var ErrInvalidRounds = fmt.Errorf("invalid number of rounds") + +// Scans a sha256-crypt or sha512-crypt modular crypt stub or modular crypt hash +// to determine configuration parameters. +func Parse(stub string) (isSHA512 bool, salt, hash string, rounds int, err error) { + // $5$ + if len(stub) < 3 || stub[0] != '$' || stub[2] != '$' { + err = ErrInvalidStub + return + } + + if stub[1] == '6' { + isSHA512 = true + } else if stub[1] != '5' { + err = ErrInvalidStub + return + } + + rest := stub[3:] + parts := strings.Split(rest, "$") + roundsStr := "" + + switch len(parts) { + case 1: + // $5$ + // $5$salt + salt = parts[0] + case 2: + // $5$salt$hash + // $5$rounds=1000$salt + if strings.HasPrefix(parts[0], "rounds=") { + roundsStr = parts[0] + salt = parts[1] + } else { + salt = parts[0] + hash = parts[1] + } + case 3: + // $5$rounds=1000$salt$hash + roundsStr = parts[0] + salt = parts[1] + hash = parts[2] + default: + err = ErrInvalidStub + } + + if roundsStr != "" { + if !strings.HasPrefix(roundsStr, "rounds=") { + err = ErrInvalidStub + return + } + + roundsStr = roundsStr[7:] + var n uint64 + n, err = strconv.ParseUint(roundsStr, 10, 31) + if err != nil { + err = ErrInvalidStub + return + } + + rounds = int(n) + + if rounds < MinimumRounds || rounds > MaximumRounds { + err = ErrInvalidRounds + return + } + } else { + rounds = DefaultRounds + } + + return +} diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/sha2crypt.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/sha2crypt.go new file mode 100644 index 0000000..4b256fd --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw/sha2crypt.go @@ -0,0 +1,187 @@ +// Package raw provides a raw implementation of the sha256-crypt and sha512-crypt primitives. +package raw + +import "io" +import "fmt" +import "hash" +import "crypto/sha256" +import "crypto/sha512" + +// The minimum number of rounds permissible for sha256-crypt and sha512-crypt. +const MinimumRounds = 1000 + +// The maximum number of rounds permissible for sha256-crypt and sha512-crypt. +// Don't use this! +const MaximumRounds = 999999999 + +// This is the 'default' number of rounds for sha256-crypt and sha512-crypt. If +// this rounds value is used the number of rounds is not explicitly specified +// in the modular crypt format, as it is the default. +const DefaultRounds = 5000 + +// This is the recommended number of rounds for sha256-crypt and sha512-crypt. +// This may change with subsequent releases of this package. It is recommended +// that you invoke sha256-crypt or sha512-crypt with this value, or a value +// proportional to it. +const RecommendedRounds = 10000 + +// Calculates sha256-crypt. The password must be in plaintext and be a UTF-8 +// string. +// +// The salt must be a valid ASCII between 0 and 16 characters in length +// inclusive. +// +// See the constants in this package for suggested values for rounds. +// +// Rounds must be in the range 1000 <= rounds <= 999999999. The function panics +// if this is not the case. +// +// The output is in modular crypt format. +func Crypt256(password, salt string, rounds int) string { + return "$5" + shaCrypt(password, salt, rounds, sha256.New, transpose256) +} + +// Calculates sha256-crypt. The password must be in plaintext and be a UTF-8 +// string. +// +// The salt must be a valid ASCII between 0 and 16 characters in length +// inclusive. +// +// See the constants in this package for suggested values for rounds. +// +// Rounds must be in the range 1000 <= rounds <= 999999999. The function panics +// if this is not the case. +// +// The output is in modular crypt format. +func Crypt512(password, salt string, rounds int) string { + return "$6" + shaCrypt(password, salt, rounds, sha512.New, transpose512) +} + +func shaCrypt(password, salt string, rounds int, newHash func() hash.Hash, transpose func(b []byte)) string { + if rounds < MinimumRounds || rounds > MaximumRounds { + panic("sha256-crypt rounds must be in 1000 <= rounds <= 999999999") + } + + passwordb := []byte(password) + saltb := []byte(salt) + if len(saltb) > 16 { + panic("salt must not exceed 16 bytes") + } + + // B + b := newHash() + b.Write(passwordb) + b.Write(saltb) + b.Write(passwordb) + bsum := b.Sum(nil) + + // A + a := newHash() + a.Write(passwordb) + a.Write(saltb) + repeat(a, bsum, len(passwordb)) + + plen := len(passwordb) + for plen != 0 { + if (plen & 1) != 0 { + a.Write(bsum) + } else { + a.Write(passwordb) + } + plen = plen >> 1 + } + + asum := a.Sum(nil) + + // DP + dp := newHash() + for i := 0; i < len(passwordb); i++ { + dp.Write(passwordb) + } + + dpsum := dp.Sum(nil) + + // P + p := make([]byte, len(passwordb)) + repeatTo(p, dpsum) + + // DS + ds := newHash() + for i := 0; i < (16 + int(asum[0])); i++ { + ds.Write(saltb) + } + + dssum := ds.Sum(nil)[0:len(saltb)] + + // S + s := make([]byte, len(saltb)) + repeatTo(s, dssum) + + // C + cur := asum[:] + for i := 0; i < rounds; i++ { + c := newHash() + if (i & 1) != 0 { + c.Write(p) + } else { + c.Write(cur) + } + if (i % 3) != 0 { + c.Write(s) + } + if (i % 7) != 0 { + c.Write(p) + } + if (i & 1) == 0 { + c.Write(p) + } else { + c.Write(cur) + } + cur = c.Sum(nil)[:] + } + + // Transposition + transpose(cur) + + // Hash + hstr := EncodeBase64(cur) + + if rounds == DefaultRounds { + return fmt.Sprintf("$%s$%s", salt, hstr) + } + + return fmt.Sprintf("$rounds=%d$%s$%s", rounds, salt, hstr) +} + +func repeat(w io.Writer, b []byte, sz int) { + var i int + for i = 0; (i + len(b)) <= sz; i += len(b) { + w.Write(b) + } + w.Write(b[0 : sz-i]) +} + +func repeatTo(out []byte, b []byte) { + if len(b) == 0 { + return + } + + var i int + for i = 0; (i + len(b)) <= len(out); i += len(b) { + copy(out[i:], b) + } + copy(out[i:], b) +} + +func transpose256(b []byte) { + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15], b[16], b[17], b[18], b[19], b[20], b[21], b[22], b[23], b[24], b[25], b[26], b[27], b[28], b[29], b[30], b[31] = + b[20], b[10], b[0], b[11], b[1], b[21], b[2], b[22], b[12], b[23], b[13], b[3], b[14], b[4], b[24], b[5], b[25], b[15], b[26], b[16], b[6], b[17], b[7], b[27], b[8], b[28], b[18], b[29], b[19], b[9], b[30], b[31] +} + +func transpose512(b []byte) { + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15], b[16], b[17], b[18], b[19], b[20], b[21], b[22], b[23], b[24], b[25], b[26], b[27], b[28], b[29], b[30], b[31], b[32], b[33], b[34], b[35], b[36], b[37], b[38], b[39], b[40], b[41], b[42], b[43], b[44], b[45], b[46], b[47], b[48], b[49], b[50], b[51], b[52], b[53], b[54], b[55], b[56], b[57], b[58], b[59], b[60], b[61], b[62], b[63] = + b[42], b[21], b[0], b[1], b[43], b[22], b[23], b[2], b[44], b[45], b[24], b[3], b[4], b[46], b[25], b[26], b[5], b[47], b[48], b[27], b[6], b[7], b[49], b[28], b[29], b[8], b[50], b[51], b[30], b[9], b[10], b[52], b[31], b[32], b[11], b[53], b[54], b[33], b[12], b[13], b[55], b[34], b[35], b[14], b[56], b[57], b[36], b[15], b[16], b[58], b[37], b[38], b[17], b[59], b[60], b[39], b[18], b[19], b[61], b[40], b[41], b[20], b[62], b[63] +} + +// © 2008-2012 Assurance Technologies LLC. (Python passlib) BSD License +// © 2014 Hugo Landau BSD License diff --git a/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/sha2crypt.go b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/sha2crypt.go new file mode 100644 index 0000000..c936469 --- /dev/null +++ b/vendor/gopkg.in/hlandau/passlib.v1/hash/sha2crypt/sha2crypt.go @@ -0,0 +1,147 @@ +// Package sha2crypt implements sha256-crypt and sha512-crypt. +package sha2crypt + +import "fmt" +import "expvar" +import "crypto/rand" +import "gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw" +import "gopkg.in/hlandau/passlib.v1/abstract" + +var cSHA2CryptHashCalls = expvar.NewInt("passlib.sha2crypt.hashCalls") +var cSHA2CryptVerifyCalls = expvar.NewInt("passlib.sha2crypt.verifyCalls") + +// An implementation of Scheme performing sha256-crypt. +// +// The number of rounds is raw.RecommendedRounds. +var Crypter256 abstract.Scheme + +// An implementation of Scheme performing sha512-crypt. +// +// The number of rounds is raw.RecommendedRounds. +var Crypter512 abstract.Scheme + +func init() { + Crypter256 = NewCrypter256(raw.RecommendedRounds) + Crypter512 = NewCrypter512(raw.RecommendedRounds) +} + +// Returns a Scheme implementing sha256-crypt using the number of rounds +// specified. +func NewCrypter256(rounds int) abstract.Scheme { + return &sha2Crypter{false, rounds} +} + +// Returns a Scheme implementing sha512-crypt using the number of rounds +// specified. +func NewCrypter512(rounds int) abstract.Scheme { + return &sha2Crypter{true, rounds} +} + +type sha2Crypter struct { + sha512 bool + rounds int +} + +// Changes the default rounds for the crypter. Be warned that this +// is a global setting. The default default value is RecommendedRounds. +func (c *sha2Crypter) SetRounds(rounds int) error { + if rounds < raw.MinimumRounds || rounds > raw.MaximumRounds { + return raw.ErrInvalidRounds + } + + c.rounds = rounds + return nil +} + +func (c *sha2Crypter) SupportsStub(stub string) bool { + if len(stub) < 3 || stub[0] != '$' || stub[2] != '$' { + return false + } + return (stub[1] == '5' && !c.sha512) || (stub[1] == '6' && c.sha512) +} + +func (c *sha2Crypter) Hash(password string) (string, error) { + cSHA2CryptHashCalls.Add(1) + + stub, err := c.makeStub() + if err != nil { + return "", err + } + + _, newHash, _, _, err := c.hash(password, stub) + return newHash, err +} + +func (c *sha2Crypter) Verify(password, hash string) (err error) { + cSHA2CryptVerifyCalls.Add(1) + + _, newHash, _, _, err := c.hash(password, hash) + if err == nil && !abstract.SecureCompare(hash, newHash) { + err = abstract.ErrInvalidPassword + } + + return +} + +func (c *sha2Crypter) NeedsUpdate(stub string) bool { + _, salt, _, rounds, err := raw.Parse(stub) + if err != nil { + return false // ... + } + + return c.needsUpdate(salt, rounds) +} + +func (c *sha2Crypter) needsUpdate(salt string, rounds int) bool { + return rounds < c.rounds || len(salt) < 16 +} + +var errInvalidStub = fmt.Errorf("invalid sha2 password stub") + +func (c *sha2Crypter) hash(password, stub string) (oldHash, newHash, salt string, rounds int, err error) { + isSHA512, salt, oldHash, rounds, err := raw.Parse(stub) + if err != nil { + return "", "", "", 0, err + } + + if isSHA512 != c.sha512 { + return "", "", "", 0, errInvalidStub + } + + if c.sha512 { + return oldHash, raw.Crypt512(password, salt, rounds), salt, rounds, nil + } + + return oldHash, raw.Crypt256(password, salt, rounds), salt, rounds, nil +} + +func (c *sha2Crypter) makeStub() (string, error) { + ch := "5" + if c.sha512 { + ch = "6" + } + + buf := make([]byte, 12) + _, err := rand.Read(buf) + if err != nil { + return "", err + } + + salt := raw.EncodeBase64(buf)[0:16] + + if c.rounds == raw.DefaultRounds { + return fmt.Sprintf("$%s$%s", ch, salt), nil + } + + return fmt.Sprintf("$%s$rounds=%d$%s", ch, c.rounds, salt), nil +} + +func (c *sha2Crypter) String() string { + if c.sha512 { + return fmt.Sprintf("sha512-crypt(%d)", c.rounds) + } else { + return fmt.Sprintf("sha256-crypt(%d)", c.rounds) + } +} + +// © 2014 Hugo Landau BSD License diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000..8da58fb --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000..1884de6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000..95ec014 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000..b13ab9f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000..2befd55 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000..84f8499 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000..0a7037a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000..f450791 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000..232313c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,208 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000..2c9d511 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000..5958822 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000..190362f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000..36d6b88 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000..d60a6b6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000..8110ce3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 0000000..6fc3a85 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,199 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "path": "appengine", + "revision": "" + }, + { + "path": "appengine/datastore", + "revision": "" + }, + { + "path": "appengine/memcache", + "revision": "" + }, + { + "checksumSHA1": "6xRcrOO03m/jXeopXDvcqL3/Zz0=", + "path": "github.com/GeertJohan/go.rice", + "revision": "4bbccbfa39e784796e483270451217d3369ecfbe", + "revisionTime": "2017-01-23T13:54:25Z" + }, + { + "checksumSHA1": "xECV8VmnSwtMPugLqB1OAXwOs48=", + "path": "github.com/GeertJohan/go.rice/embedded", + "revision": "4bbccbfa39e784796e483270451217d3369ecfbe", + "revisionTime": "2017-01-23T13:54:25Z" + }, + { + "checksumSHA1": "8i+beEgcVf0q/I7lTqo2ERZM/OU=", + "path": "github.com/daaku/go.zipexe", + "revision": "a5fe2436ffcb3236e175e5149162b41cd28bd27d", + "revisionTime": "2015-03-29T02:31:25Z" + }, + { + "checksumSHA1": "6defOlYtxIqheaUEG/cWWouQnIU=", + "path": "github.com/hlandau/passlib", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "jJwSo4r54rSLoIyAXFCtGsJlcfw=", + "path": "github.com/icza/session", + "revision": "81bce62677205deaf9d559f424b7686dc9b9fe0a", + "revisionTime": "2017-02-17T09:53:04Z" + }, + { + "checksumSHA1": "ve4j+BJutjpqooYuYQectRK9zTw=", + "path": "github.com/justinas/nosurf", + "revision": "8e15682772641a1e39c431233e6a9338a32def32", + "revisionTime": "2016-10-04T08:52:51Z" + }, + { + "checksumSHA1": "eWtEV0iBNTL6DRJVqEeniS+thHA=", + "path": "github.com/kardianos/osext", + "revision": "9d302b58e975387d0b4d9be876622c86cefe64be", + "revisionTime": "2017-03-09T17:28:38Z" + }, + { + "checksumSHA1": "BvfT3pAC+s5qn5nEDf0N7lxpBNg=", + "path": "github.com/pressly/chi", + "revision": "57ee7612d4405274628c71d1dc9455a12646f056", + "revisionTime": "2017-02-19T07:36:56Z" + }, + { + "checksumSHA1": "9HdWzw76EiC7Hwn+Y2QgNST1Lqc=", + "path": "github.com/pressly/chi/middleware", + "revision": "57ee7612d4405274628c71d1dc9455a12646f056", + "revisionTime": "2017-02-19T07:36:56Z" + }, + { + "checksumSHA1": "dezZFwteKCEvEN0IkR6XSD6qoJU=", + "path": "github.com/tidwall/btree", + "revision": "9876f1454cf0993a53d74c27196993e345f50dd1", + "revisionTime": "2017-01-13T22:41:14Z" + }, + { + "checksumSHA1": "VGIoT8ekkhBYV4z9n1/iK9MXf7A=", + "path": "github.com/tidwall/buntdb", + "revision": "74dc10171b7549022c818bd212a9ddea151db02e", + "revisionTime": "2016-12-02T16:37:38Z" + }, + { + "checksumSHA1": "k/Xh0p5L7+tBCXAL2dOCwUf9J3Y=", + "path": "github.com/tidwall/gjson", + "revision": "09d1c5c5bc64e094394dfe2150220d906c55ac37", + "revisionTime": "2017-02-05T16:10:42Z" + }, + { + "checksumSHA1": "fB70Sk+JyVkdav2NHeNjG8Z0mkQ=", + "path": "github.com/tidwall/grect", + "revision": "ba9a043346eba55344e40d66a5e74cfda3a9d293", + "revisionTime": "2016-10-06T13:56:19Z" + }, + { + "checksumSHA1": "qmePMXEDYGwkAfT9QvtMC58JN/E=", + "path": "github.com/tidwall/match", + "revision": "173748da739a410c5b0b813b956f89ff94730b4c", + "revisionTime": "2016-08-30T17:39:30Z" + }, + { + "checksumSHA1": "lCeIrhrooBzMXbB/YH32E7I4MSU=", + "path": "github.com/tidwall/rtree", + "revision": "d4a8a3d30d5729f85edfba1745241f3a621d0359", + "revisionTime": "2016-09-03T21:37:29Z" + }, + { + "checksumSHA1": "vE43s37+4CJ2CDU6TlOUOYE0K9c=", + "path": "golang.org/x/crypto/bcrypt", + "revision": "728b753d0135da6801d45a38e6f43ff55779c5c2", + "revisionTime": "2017-01-24T01:46:54Z" + }, + { + "checksumSHA1": "KrDsWIDDeafYKOOv6UEPmtQWWb0=", + "path": "golang.org/x/crypto/blake2b", + "revision": "728b753d0135da6801d45a38e6f43ff55779c5c2", + "revisionTime": "2017-01-24T01:46:54Z" + }, + { + "checksumSHA1": "JsJdKXhz87gWenMwBeejTOeNE7k=", + "path": "golang.org/x/crypto/blowfish", + "revision": "728b753d0135da6801d45a38e6f43ff55779c5c2", + "revisionTime": "2017-01-24T01:46:54Z" + }, + { + "checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=", + "path": "golang.org/x/crypto/pbkdf2", + "revision": "728b753d0135da6801d45a38e6f43ff55779c5c2", + "revisionTime": "2017-01-24T01:46:54Z" + }, + { + "checksumSHA1": "E8pDMGySfy5Mw+jzXOkOxo35bww=", + "path": "golang.org/x/crypto/scrypt", + "revision": "728b753d0135da6801d45a38e6f43ff55779c5c2", + "revisionTime": "2017-01-24T01:46:54Z" + }, + { + "checksumSHA1": "klJ9QeZbE0SGPA1hhLjMwMTONig=", + "path": "gopkg.in/hlandau/easymetric.v1/cexp", + "revision": "1c173222e345c2ba115e0f483821c6da2d3b0fa9", + "revisionTime": "2015-09-23T21:15:05Z" + }, + { + "checksumSHA1": "hBKoXBOEfoSVBsKCrUuB1YoLqNM=", + "path": "gopkg.in/hlandau/measurable.v1", + "revision": "d96a009bd9bda94dd1294105e67e8c1794e533d3", + "revisionTime": "2015-10-29T11:39:54Z" + }, + { + "checksumSHA1": "dLTGi5ic/+/0iXUwlNJACCUegBo=", + "path": "gopkg.in/hlandau/passlib.v1/abstract", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "WIlU/mYXRx+dOE8duG8NYhtJr2o=", + "path": "gopkg.in/hlandau/passlib.v1/hash/bcrypt", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "+KHCKMjQSiGcoBTCK/rNaDUsZjA=", + "path": "gopkg.in/hlandau/passlib.v1/hash/bcryptsha256", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "g9Craq/vfDfd8Mw9eAy6NFjfFz0=", + "path": "gopkg.in/hlandau/passlib.v1/hash/scrypt", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "V4W70BcLKNnb53BTJ49YVmHapHA=", + "path": "gopkg.in/hlandau/passlib.v1/hash/scrypt/raw", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "wbxXrJXpVsXVdLAv+d2zK+4ZZf4=", + "path": "gopkg.in/hlandau/passlib.v1/hash/sha2crypt", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "dn+VC/24lLLgOErHEH8KoUN/Q18=", + "path": "gopkg.in/hlandau/passlib.v1/hash/sha2crypt/raw", + "revision": "933f1c3f63ffb0d17de04c3174f875ae683a33cd", + "revisionTime": "2016-09-22T11:41:03Z" + }, + { + "checksumSHA1": "0KwOlQV1dNUh9X8t+5s7nX5bqfk=", + "path": "gopkg.in/yaml.v2", + "revision": "a3f3340b5840cee44f372bddb5880fcbc419b46a", + "revisionTime": "2017-02-08T14:18:51Z" + } + ], + "rootPath": "go.rls.moe/nyx" +}