diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2acb044 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +config.yml +nyx-testing.db +nyx +run.sh \ No newline at end of file diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..4a269fc --- /dev/null +++ b/config/config.go @@ -0,0 +1,93 @@ +package config + +import ( + "flag" + "gopkg.in/yaml.v2" + "io/ioutil" + "os" +) + +var configFileName = "./config.yml" + +func init() { + flag.StringVar(&configFileName, "config", "./config.yml", "Config File Location") +} + +type Config struct { + Site SiteConfig `yaml:"site"` // Site/HTML Configuration + DB DBConfig `yaml:"db"` // Database Configuration + HostnameWhiteList []string `yaml:"hosts"` // List of valid hostnames, ignored if empty + ListenOn string `yaml:"listen_on"` // Address & Port to use + MasterSecret string `yaml:"secret"` // Master Secret for keychain + DisableSecurity bool `yaml:"disable_security"` // Disables various flags to ensure non-HTTPS requests work + Captcha CaptchaConfig `yaml:"captcha"` +} + +const ( + CaptchaRecaptcha = "recaptcha" + CaptchaDisabled = "disabled" +) + +type CaptchaConfig struct { + Mode string `yaml:"mode"` // Captcha Mode + Settings map[string]string `yaml:"settings,inline"` +} + +type SiteConfig struct { + Title string `yaml:"title"` // Site Title + Description string `yaml:"description"` // Site Description + PrimaryColor string `yaml:"color"` // Primary Color for Size +} + +type DBConfig struct { + File string `yaml:"file"` + ReadOnly bool `yaml:"read_only"` +} + +func Load() (*Config, error) { + var config = &Config{ + Site: SiteConfig{ + Title: "NyxChan", + PrimaryColor: "#78909c", + Description: "NyxChan Default Configuration", + }, + DB: DBConfig{ + File: ":memory:", + ReadOnly: false, + }, + HostnameWhiteList: []string{}, + ListenOn: ":8080", + MasterSecret: "changeme", + DisableSecurity: false, + Captcha: CaptchaConfig{ + Mode: CaptchaDisabled, + }, + } + if _, err := os.Stat(configFileName); os.IsNotExist(err) { + return config, nil + } + dat, err := ioutil.ReadFile(configFileName) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(dat, config) + if err != nil { + return nil, err + } + return config, nil +} + +func (c Config) IsHostNameValid(hostname string) bool { + if c.HostnameWhiteList == nil { + return true + } + if len(c.HostnameWhiteList) == 0 { + return true + } + for _, v := range c.HostnameWhiteList { + if v == hostname { + return true + } + } + return false +} diff --git a/http/admin/handler.go b/http/admin/handler.go new file mode 100644 index 0000000..f7ddb3b --- /dev/null +++ b/http/admin/handler.go @@ -0,0 +1,119 @@ +package admin + +import ( + "bytes" + "github.com/GeertJohan/go.rice" + "github.com/icza/session" + "github.com/pressly/chi" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "html/template" + "net/http" + "time" +) + +var riceConf = rice.Config{ + LocateOrder: []rice.LocateMethod{ + rice.LocateWorkingDirectory, + rice.LocateEmbedded, + rice.LocateAppended, + }, +} + +var box = riceConf.MustFindBox("http/admin/res/") + +var ( + panelTmpl = template.New("admin/panel") + loginTmpl = template.New("admin/login") +) + +func init() { + var err error + panelTmpl, err = panelTmpl.Parse(box.MustString("panel.html")) + if err != nil { + panic(err) + } + loginTmpl, err = loginTmpl.Parse(box.MustString("index.html")) + if err != nil { + panic(err) + } +} + +// Router sets up the Administration Panel +// It **must** be setup on the /admin/ basepath +func Router(r chi.Router) { + r.Get("/", serveLogin) + r.Get("/index.html", serveLogin) + r.Get("/panel.html", servePanel) + r.Post("/new_board.sh", handleNewBoard) + r.Post("/login.sh", handleLogin) + r.Post("/logout.sh", handleLogout) +} + +func serveLogin(w http.ResponseWriter, r *http.Request) { + dat := bytes.NewBuffer([]byte{}) + err := loginTmpl.Execute(dat, middle.GetBaseCtx(r)) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "index.html", time.Now(), + bytes.NewReader(dat.Bytes())) +} + +func servePanel(w http.ResponseWriter, r *http.Request) { + sess := middle.GetSession(r) + if sess == nil { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) + return + } + dat := bytes.NewBuffer([]byte{}) + err := panelTmpl.Execute(dat, middle.GetBaseCtx(r)) + if err != nil { + errw.ErrorWriter(err, w, r) + return + } + http.ServeContent(w, r, "panel.html", time.Now(), + bytes.NewReader(dat.Bytes())) +} + +func handleLogout(w http.ResponseWriter, r *http.Request) { + sess := middle.GetSession(r) + if sess == nil { + http.Redirect(w, r, "/admin/index.html", http.StatusSeeOther) + } + session.Remove(sess, w) + http.Redirect(w, r, "/admin/index.html", http.StatusSeeOther) +} +func handleLogin(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + if err != nil { + errw.ErrorWriter(err, w, r) + } + db := middle.GetDB(r) + + var admin = &resources.AdminPass{} + err = db.View(func(tx *buntdb.Tx) error { + var err error + admin, err = resources.GetAdmin(tx, r.FormValue("id")) + return err + }) + if err != nil { + err = errw.MakeErrorWithTitle("Access Denied", "User or Password Invalid") + errw.ErrorWriter(err, w, r) + } + err = admin.VerifyLogin(r.FormValue("pass")) + if err != nil { + err = errw.MakeErrorWithTitle("Access Denied", "User or Password Invalid") + errw.ErrorWriter(err, w, r) + } + sess := session.NewSessionOptions(&session.SessOptions{ + CAttrs: map[string]interface{}{"mode": "admin"}, + }) + session.Add(sess, w) + + http.Redirect(w, r, "/admin/panel.html", http.StatusSeeOther) +} diff --git a/http/admin/newboard.go b/http/admin/newboard.go new file mode 100644 index 0000000..ef981bc --- /dev/null +++ b/http/admin/newboard.go @@ -0,0 +1,58 @@ +package admin + +import ( + "errors" + "github.com/tidwall/buntdb" + "go.rls.moe/nyx/http/errw" + "go.rls.moe/nyx/http/middle" + "go.rls.moe/nyx/resources" + "net/http" +) + +func handleNewBoard(w http.ResponseWriter, r *http.Request) { + sess := middle.GetSession(r) + if sess == nil { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) + return + } + if sess.CAttr("mode") != "admin" { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) + return + } + + err := r.ParseForm() + if err != nil { + errw.ErrorWriter(err, w, r) + } + db := middle.GetDB(r) + + var board = &resources.Board{} + + board.ShortName = r.FormValue("shortname") + board.LongName = r.FormValue("longname") + + if board.ShortName == "" { + errw.ErrorWriter(errors.New("Need shortname"), w, r) + return + } + + if board.ShortName == "admin" && board.ShortName == "@" { + errw.ErrorWriter(errors.New("No"), w, r) + } + + if board.LongName == "" && len(board.LongName) < 5 { + errw.ErrorWriter(errors.New("Need 5 characters for long name"), w, r) + return + } + + if err = db.Update(func(tx *buntdb.Tx) error { + return resources.NewBoard(tx, r.Host, board) + }); err != nil { + errw.ErrorWriter(err, w, r) + return + } + + http.Redirect(w, r, "/admin/panel.html", http.StatusSeeOther) +} diff --git a/http/admin/res/index.html b/http/admin/res/index.html new file mode 100644 index 0000000..ad7e0d2 --- /dev/null +++ b/http/admin/res/index.html @@ -0,0 +1,41 @@ + + +
+ + ++ {{range .GetReplies}} + {{ if ne .ID $threadrid }} ++ {{ renderText .GetReply.Text}} +
>> | +
+
+ No.{{.ID}}
+ ++ {{ renderText .Text}} + |
+
+ {{range .GetReplies}} + {{ if ne .ID $threadrid }} ++ {{ renderText .GetReply.Text}} +
>> | +
+
+ No.{{.ID}}
+ ++ {{ renderText .Text}} + |
+
Your name: {{ .name }}
+{{ end }} + + + +` +var templ = template.Must(template.New("t1").Parse(templateString)) + +func myFunc(w http.ResponseWriter, r *http.Request) { + context := make(map[string]string) + context["token"] = nosurf.Token(r) + if r.Method == "POST" { + context["name"] = r.FormValue("name") + } + + templ.Execute(w, context) +} + +func main() { + myHandler := http.HandlerFunc(myFunc) + fmt.Println("Listening on http://127.0.0.1:8000/") + http.ListenAndServe(":8000", nosurf.New(myHandler)) +} +``` + +### Manual token verification +In some cases the CSRF token may be send through a non standard way, +e.g. a body or request is a JSON encoded message with one of the fields +being a token. + +In such case the handler(path) should be excluded from an automatic +verification by using one of the exemption methods: + +```go + func (h *CSRFHandler) ExemptFunc(fn func(r *http.Request) bool) + func (h *CSRFHandler) ExemptGlob(pattern string) + func (h *CSRFHandler) ExemptGlobs(patterns ...string) + func (h *CSRFHandler) ExemptPath(path string) + func (h *CSRFHandler) ExemptPaths(paths ...string) + func (h *CSRFHandler) ExemptRegexp(re interface{}) + func (h *CSRFHandler) ExemptRegexps(res ...interface{}) +``` + +Later on, the token **must** be verified by manually getting the token from the cookie +and providing the token sent in body through: `VerifyToken(tkn, tkn2 string) bool`. + +Example: +```go +func HandleJson(w http.ResponseWriter, r *http.Request) { + d := struct{ + X,Y int + Tkn string + }{} + json.Unmarshal(ioutil.ReadAll(r.Body), &d) + if !nosurf.VerifyToken(Token(r), d.Tkn) { + http.Errorf(w, "CSRF token incorrect", http.StatusBadRequest) + return + } + // do smth cool +} +``` + +### Contributing + +0. Find an issue that bugs you / open a new one. +1. Discuss. +2. Branch off, commit, test. +3. Make a pull request / attach the commits to the issue. diff --git a/vendor/github.com/justinas/nosurf/context.go b/vendor/github.com/justinas/nosurf/context.go new file mode 100644 index 0000000..fb6b83d --- /dev/null +++ b/vendor/github.com/justinas/nosurf/context.go @@ -0,0 +1,60 @@ +// +build go1.7 + +package nosurf + +import "net/http" + +type ctxKey int + +const ( + nosurfKey ctxKey = iota +) + +type csrfContext struct { + // The masked, base64 encoded token + // That's suitable for use in form fields, etc. + token string + // reason for the failure of CSRF check + reason error +} + +// Token takes an HTTP request and returns +// the CSRF token for that request +// or an empty string if the token does not exist. +// +// Note that the token won't be available after +// CSRFHandler finishes +// (that is, in another handler that wraps it, +// or after the request has been served) +func Token(req *http.Request) string { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + + return ctx.token +} + +// Reason takes an HTTP request and returns +// the reason of failure of the CSRF check for that request +// +// Note that the same availability restrictions apply for Reason() as for Token(). +func Reason(req *http.Request) error { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + + return ctx.reason +} + +func ctxClear(_ *http.Request) { +} + +func ctxSetToken(req *http.Request, token []byte) { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + ctx.token = b64encode(maskToken(token)) +} + +func ctxSetReason(req *http.Request, reason error) { + ctx := req.Context().Value(nosurfKey).(*csrfContext) + if ctx.token == "" { + panic("Reason should never be set when there's no token in the context yet.") + } + + ctx.reason = reason +} diff --git a/vendor/github.com/justinas/nosurf/context_legacy.go b/vendor/github.com/justinas/nosurf/context_legacy.go new file mode 100644 index 0000000..81e1b89 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/context_legacy.go @@ -0,0 +1,101 @@ +// +build !go1.7 + +package nosurf + +import ( + "net/http" + "sync" +) + +// This file implements a context similar to one found +// in gorilla/context, but tailored specifically for our use case +// and not using gorilla's package just because. + +type csrfContext struct { + // The masked, base64 encoded token + // That's suitable for use in form fields, etc. + token string + // reason for the failure of CSRF check + reason error +} + +var ( + contextMap = make(map[*http.Request]*csrfContext) + cmMutex = new(sync.RWMutex) +) + +// Token() takes an HTTP request and returns +// the CSRF token for that request +// or an empty string if the token does not exist. +// +// Note that the token won't be available after +// CSRFHandler finishes +// (that is, in another handler that wraps it, +// or after the request has been served) +func Token(req *http.Request) string { + cmMutex.RLock() + defer cmMutex.RUnlock() + + ctx, ok := contextMap[req] + + if !ok { + return "" + } + + return ctx.token +} + +// Reason() takes an HTTP request and returns +// the reason of failure of the CSRF check for that request +// +// Note that the same availability restrictions apply for Reason() as for Token(). +func Reason(req *http.Request) error { + cmMutex.RLock() + defer cmMutex.RUnlock() + + ctx, ok := contextMap[req] + + if !ok { + return nil + } + + return ctx.reason +} + +// Takes a raw token, masks it with a per-request key, +// encodes in base64 and makes it available to the wrapped handler +func ctxSetToken(req *http.Request, token []byte) *http.Request { + cmMutex.Lock() + defer cmMutex.Unlock() + + ctx, ok := contextMap[req] + if !ok { + ctx = new(csrfContext) + contextMap[req] = ctx + } + + ctx.token = b64encode(maskToken(token)) + + return req +} + +func ctxSetReason(req *http.Request, reason error) *http.Request { + cmMutex.Lock() + defer cmMutex.Unlock() + + ctx, ok := contextMap[req] + if !ok { + panic("Reason should never be set when there's no token" + + " (context) yet.") + } + + ctx.reason = reason + return req +} + +func ctxClear(req *http.Request) { + cmMutex.Lock() + defer cmMutex.Unlock() + + delete(contextMap, req) +} diff --git a/vendor/github.com/justinas/nosurf/crypto.go b/vendor/github.com/justinas/nosurf/crypto.go new file mode 100644 index 0000000..68817f2 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/crypto.go @@ -0,0 +1,54 @@ +package nosurf + +import ( + "crypto/rand" + "io" +) + +// Masks/unmasks the given data *in place* +// with the given key +// Slices must be of the same length, or oneTimePad will panic +func oneTimePad(data, key []byte) { + n := len(data) + if n != len(key) { + panic("Lengths of slices are not equal") + } + + for i := 0; i < n; i++ { + data[i] ^= key[i] + } +} + +func maskToken(data []byte) []byte { + if len(data) != tokenLength { + return nil + } + + // tokenLength*2 == len(enckey + token) + result := make([]byte, 2*tokenLength) + // the first half of the result is the OTP + // the second half is the masked token itself + key := result[:tokenLength] + token := result[tokenLength:] + copy(token, data) + + // generate the random token + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + + oneTimePad(token, key) + return result +} + +func unmaskToken(data []byte) []byte { + if len(data) != tokenLength*2 { + return nil + } + + key := data[:tokenLength] + token := data[tokenLength:] + oneTimePad(token, key) + + return token +} diff --git a/vendor/github.com/justinas/nosurf/exempt.go b/vendor/github.com/justinas/nosurf/exempt.go new file mode 100644 index 0000000..f49a444 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/exempt.go @@ -0,0 +1,108 @@ +package nosurf + +import ( + "fmt" + "net/http" + pathModule "path" + "reflect" + "regexp" +) + +// Checks if the given request is exempt from CSRF checks. +// It checks the ExemptFunc first, then the exact paths, +// then the globs and finally the regexps. +func (h *CSRFHandler) IsExempt(r *http.Request) bool { + if h.exemptFunc != nil && h.exemptFunc(r) { + return true + } + + path := r.URL.Path + if sContains(h.exemptPaths, path) { + return true + } + + // then the globs + for _, glob := range h.exemptGlobs { + matched, err := pathModule.Match(glob, path) + if matched && err == nil { + return true + } + } + + // finally, the regexps + for _, re := range h.exemptRegexps { + if re.MatchString(path) { + return true + } + } + + return false +} + +// Exempts an exact path from CSRF checks +// With this (and other Exempt* methods) +// you should take note that Go's paths +// include a leading slash. +func (h *CSRFHandler) ExemptPath(path string) { + h.exemptPaths = append(h.exemptPaths, path) +} + +// A variadic argument version of ExemptPath() +func (h *CSRFHandler) ExemptPaths(paths ...string) { + for _, v := range paths { + h.ExemptPath(v) + } +} + +// Exempts URLs that match the specified glob pattern +// (as used by filepath.Match()) from CSRF checks +// +// Note that ExemptGlob() is unable to detect syntax errors, +// because it doesn't have a path to check it against +// and filepath.Match() doesn't report an error +// if the path is empty. +// If we find a way to check the syntax, ExemptGlob +// MIGHT PANIC on a syntax error in the future. +// ALWAYS check your globs for syntax errors. +func (h *CSRFHandler) ExemptGlob(pattern string) { + h.exemptGlobs = append(h.exemptGlobs, pattern) +} + +// A variadic argument version of ExemptGlob() +func (h *CSRFHandler) ExemptGlobs(patterns ...string) { + for _, v := range patterns { + h.ExemptGlob(v) + } +} + +// Accepts a regular expression string or a compiled *regexp.Regexp +// and exempts URLs that match it from CSRF checks. +// +// If the given argument is neither of the accepted values, +// or the given string fails to compile, ExemptRegexp() panics. +func (h *CSRFHandler) ExemptRegexp(re interface{}) { + var compiled *regexp.Regexp + + switch re.(type) { + case string: + compiled = regexp.MustCompile(re.(string)) + case *regexp.Regexp: + compiled = re.(*regexp.Regexp) + default: + err := fmt.Sprintf("%v isn't a valid type for ExemptRegexp()", reflect.TypeOf(re)) + panic(err) + } + + h.exemptRegexps = append(h.exemptRegexps, compiled) +} + +// A variadic argument version of ExemptRegexp() +func (h *CSRFHandler) ExemptRegexps(res ...interface{}) { + for _, v := range res { + h.ExemptRegexp(v) + } +} + +func (h *CSRFHandler) ExemptFunc(fn func(r *http.Request) bool) { + h.exemptFunc = fn +} diff --git a/vendor/github.com/justinas/nosurf/handler.go b/vendor/github.com/justinas/nosurf/handler.go new file mode 100644 index 0000000..298df5e --- /dev/null +++ b/vendor/github.com/justinas/nosurf/handler.go @@ -0,0 +1,220 @@ +// Package nosurf implements an HTTP handler that +// mitigates Cross-Site Request Forgery Attacks. +package nosurf + +import ( + "errors" + "net/http" + "net/url" + "regexp" +) + +const ( + // the name of CSRF cookie + CookieName = "csrf_token" + // the name of the form field + FormFieldName = "csrf_token" + // the name of CSRF header + HeaderName = "X-CSRF-Token" + // the HTTP status code for the default failure handler + FailureCode = 400 + + // Max-Age in seconds for the default base cookie. 365 days. + MaxAge = 365 * 24 * 60 * 60 +) + +var safeMethods = []string{"GET", "HEAD", "OPTIONS", "TRACE"} + +// reasons for CSRF check failures +var ( + ErrNoReferer = errors.New("A secure request contained no Referer or its value was malformed") + ErrBadReferer = errors.New("A secure request's Referer comes from a different Origin" + + " from the request's URL") + ErrBadToken = errors.New("The CSRF token in the cookie doesn't match the one" + + " received in a form/header.") +) + +type CSRFHandler struct { + // Handlers that CSRFHandler wraps. + successHandler http.Handler + failureHandler http.Handler + + // The base cookie that CSRF cookies will be built upon. + // This should be a better solution of customizing the options + // than a bunch of methods SetCookieExpiration(), etc. + baseCookie http.Cookie + + // Slices of paths that are exempt from CSRF checks. + // They can be specified by... + // ...an exact path, + exemptPaths []string + // ...a regexp, + exemptRegexps []*regexp.Regexp + // ...or a glob (as used by path.Match()). + exemptGlobs []string + // ...or a custom matcher function + exemptFunc func(r *http.Request) bool + + // All of those will be matched against Request.URL.Path, + // So they should take the leading slash into account +} + +func defaultFailureHandler(w http.ResponseWriter, r *http.Request) { + http.Error(w, "", FailureCode) +} + +// Extracts the "sent" token from the request +// and returns an unmasked version of it +func extractToken(r *http.Request) []byte { + var sentToken string + + // Prefer the header over form value + sentToken = r.Header.Get(HeaderName) + + // Then POST values + if len(sentToken) == 0 { + sentToken = r.PostFormValue(FormFieldName) + } + + // If all else fails, try a multipart value. + // PostFormValue() will already have called ParseMultipartForm() + if len(sentToken) == 0 && r.MultipartForm != nil { + vals := r.MultipartForm.Value[FormFieldName] + if len(vals) != 0 { + sentToken = vals[0] + } + } + + return b64decode(sentToken) +} + +// Constructs a new CSRFHandler that calls +// the specified handler if the CSRF check succeeds. +func New(handler http.Handler) *CSRFHandler { + baseCookie := http.Cookie{} + baseCookie.MaxAge = MaxAge + + csrf := &CSRFHandler{successHandler: handler, + failureHandler: http.HandlerFunc(defaultFailureHandler), + baseCookie: baseCookie, + } + + return csrf +} + +// The same as New(), but has an interface return type. +func NewPure(handler http.Handler) http.Handler { + return New(handler) +} + +func (h *CSRFHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r = addNosurfContext(r) + defer ctxClear(r) + w.Header().Add("Vary", "Cookie") + + var realToken []byte + + tokenCookie, err := r.Cookie(CookieName) + if err == nil { + realToken = b64decode(tokenCookie.Value) + } + + // If the length of the real token isn't what it should be, + // it has either been tampered with, + // or we're migrating onto a new algorithm for generating tokens, + // or it hasn't ever been set so far. + // In any case of those, we should regenerate it. + // + // As a consequence, CSRF check will fail when comparing the tokens later on, + // so we don't have to fail it just yet. + if len(realToken) != tokenLength { + h.RegenerateToken(w, r) + } else { + ctxSetToken(r, realToken) + } + + if sContains(safeMethods, r.Method) || h.IsExempt(r) { + // short-circuit with a success for safe methods + h.handleSuccess(w, r) + return + } + + // if the request is secure, we enforce origin check + // for referer to prevent MITM of http->https requests + if r.URL.Scheme == "https" { + referer, err := url.Parse(r.Header.Get("Referer")) + + // if we can't parse the referer or it's empty, + // we assume it's not specified + if err != nil || referer.String() == "" { + ctxSetReason(r, ErrNoReferer) + h.handleFailure(w, r) + return + } + + // if the referer doesn't share origin with the request URL, + // we have another error for that + if !sameOrigin(referer, r.URL) { + ctxSetReason(r, ErrBadReferer) + h.handleFailure(w, r) + return + } + } + + // Finally, we check the token itself. + sentToken := extractToken(r) + + if !verifyToken(realToken, sentToken) { + ctxSetReason(r, ErrBadToken) + h.handleFailure(w, r) + return + } + + // Everything else passed, handle the success. + h.handleSuccess(w, r) +} + +// handleSuccess simply calls the successHandler. +// Everything else, like setting a token in the context +// is taken care of by h.ServeHTTP() +func (h *CSRFHandler) handleSuccess(w http.ResponseWriter, r *http.Request) { + h.successHandler.ServeHTTP(w, r) +} + +// Same applies here: h.ServeHTTP() sets the failure reason, the token, +// and only then calls handleFailure() +func (h *CSRFHandler) handleFailure(w http.ResponseWriter, r *http.Request) { + h.failureHandler.ServeHTTP(w, r) +} + +// Generates a new token, sets it on the given request and returns it +func (h *CSRFHandler) RegenerateToken(w http.ResponseWriter, r *http.Request) string { + token := generateToken() + h.setTokenCookie(w, r, token) + + return Token(r) +} + +func (h *CSRFHandler) setTokenCookie(w http.ResponseWriter, r *http.Request, token []byte) { + // ctxSetToken() does the masking for us + ctxSetToken(r, token) + + cookie := h.baseCookie + cookie.Name = CookieName + cookie.Value = b64encode(token) + + http.SetCookie(w, &cookie) + +} + +// Sets the handler to call in case the CSRF check +// fails. By default it's defaultFailureHandler. +func (h *CSRFHandler) SetFailureHandler(handler http.Handler) { + h.failureHandler = handler +} + +// Sets the base cookie to use when building a CSRF token cookie +// This way you can specify the Domain, Path, HttpOnly, Secure, etc. +func (h *CSRFHandler) SetBaseCookie(cookie http.Cookie) { + h.baseCookie = cookie +} diff --git a/vendor/github.com/justinas/nosurf/handler_go17.go b/vendor/github.com/justinas/nosurf/handler_go17.go new file mode 100644 index 0000000..2d8ee9f --- /dev/null +++ b/vendor/github.com/justinas/nosurf/handler_go17.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package nosurf + +import ( + "context" + "net/http" +) + +func addNosurfContext(r *http.Request) *http.Request { + return r.WithContext(context.WithValue(r.Context(), nosurfKey, &csrfContext{})) +} diff --git a/vendor/github.com/justinas/nosurf/handler_legacy.go b/vendor/github.com/justinas/nosurf/handler_legacy.go new file mode 100644 index 0000000..6f2d10a --- /dev/null +++ b/vendor/github.com/justinas/nosurf/handler_legacy.go @@ -0,0 +1,9 @@ +// +build !go1.7 + +package nosurf + +import "net/http" + +func addNosurfContext(r *http.Request) *http.Request { + return r +} diff --git a/vendor/github.com/justinas/nosurf/token.go b/vendor/github.com/justinas/nosurf/token.go new file mode 100644 index 0000000..3c86e11 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/token.go @@ -0,0 +1,105 @@ +package nosurf + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "fmt" + "io" +) + +const ( + tokenLength = 32 +) + +/* +There are two types of tokens. + +* The unmasked "real" token consists of 32 random bytes. + It is stored in a cookie (base64-encoded) and it's the + "reference" value that sent tokens get compared to. + +* The masked "sent" token consists of 64 bytes: + 32 byte key used for one-time pad masking and + 32 byte "real" token masked with the said key. + It is used as a value (base64-encoded as well) + in forms and/or headers. + +Upon processing, both tokens are base64-decoded +and then treated as 32/64 byte slices. +*/ + +// A token is generated by returning tokenLength bytes +// from crypto/rand +func generateToken() []byte { + bytes := make([]byte, tokenLength) + + if _, err := io.ReadFull(rand.Reader, bytes); err != nil { + panic(err) + } + + return bytes +} + +func b64encode(data []byte) string { + return base64.StdEncoding.EncodeToString(data) +} + +func b64decode(data string) []byte { + decoded, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return nil + } + return decoded +} + +// VerifyToken verifies the sent token equals the real one +// and returns a bool value indicating if tokens are equal. +// Supports masked tokens. realToken comes from Token(r) and +// sentToken is token sent unusual way. +func VerifyToken(realToken, sentToken string) bool { + r := b64decode(realToken) + if len(r) == 2*tokenLength { + r = unmaskToken(r) + } + s := b64decode(sentToken) + if len(s) == 2*tokenLength { + s = unmaskToken(s) + } + return subtle.ConstantTimeCompare(r, s) == 1 +} + +func verifyToken(realToken, sentToken []byte) bool { + realN := len(realToken) + sentN := len(sentToken) + + // sentN == tokenLength means the token is unmasked + // sentN == 2*tokenLength means the token is masked. + + if realN == tokenLength && sentN == 2*tokenLength { + return verifyMasked(realToken, sentToken) + } else { + return false + } +} + +// Verifies the masked token +func verifyMasked(realToken, sentToken []byte) bool { + sentPlain := unmaskToken(sentToken) + return subtle.ConstantTimeCompare(realToken, sentPlain) == 1 +} + +func checkForPRNG() { + // Check that cryptographically secure PRNG is available + // In case it's not, panic. + buf := make([]byte, 1) + _, err := io.ReadFull(rand.Reader, buf) + + if err != nil { + panic(fmt.Sprintf("crypto/rand is unavailable: Read() failed with %#v", err)) + } +} + +func init() { + checkForPRNG() +} diff --git a/vendor/github.com/justinas/nosurf/utils.go b/vendor/github.com/justinas/nosurf/utils.go new file mode 100644 index 0000000..37ae6d9 --- /dev/null +++ b/vendor/github.com/justinas/nosurf/utils.go @@ -0,0 +1,25 @@ +package nosurf + +import ( + "net/url" +) + +func sContains(slice []string, s string) bool { + // checks if the given slice contains the given string + for _, v := range slice { + if v == s { + return true + } + } + return false +} + +// Checks if the given URLs have the same origin +// (that is, they share the host, the port and the scheme) +func sameOrigin(u1, u2 *url.URL) bool { + // we take pointers, as url.Parse() returns a pointer + // and http.Request.URL is a pointer as well + + // Host is either host or host:port + return (u1.Scheme == u2.Scheme && u1.Host == u2.Host) +} diff --git a/vendor/github.com/kardianos/osext/LICENSE b/vendor/github.com/kardianos/osext/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/kardianos/osext/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kardianos/osext/README.md b/vendor/github.com/kardianos/osext/README.md new file mode 100644 index 0000000..15cbc3d --- /dev/null +++ b/vendor/github.com/kardianos/osext/README.md @@ -0,0 +1,21 @@ +### Extensions to the "os" package. + +[![GoDoc](https://godoc.org/github.com/kardianos/osext?status.svg)](https://godoc.org/github.com/kardianos/osext) + +## Find the current Executable and ExecutableFolder. + +As of go1.8 the Executable function may be found in `os`. The Executable function +in the std lib `os` package is used if available. + +There is sometimes utility in finding the current executable file +that is running. This can be used for upgrading the current executable +or finding resources located relative to the executable file. Both +working directory and the os.Args[0] value are arbitrary and cannot +be relied on; os.Args[0] can be "faked". + +Multi-platform and supports: + * Linux + * OS X + * Windows + * Plan 9 + * BSDs. diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go new file mode 100644 index 0000000..17f380f --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Extensions to the standard "os" package. +package osext // import "github.com/kardianos/osext" + +import "path/filepath" + +var cx, ce = executableClean() + +func executableClean() (string, error) { + p, err := executable() + return filepath.Clean(p), err +} + +// Executable returns an absolute path that can be used to +// re-invoke the current program. +// It may not be valid after the current program exits. +func Executable() (string, error) { + return cx, ce +} + +// Returns same path as Executable, returns just the folder +// path. Excludes the executable name and any trailing slash. +func ExecutableFolder() (string, error) { + p, err := Executable() + if err != nil { + return "", err + } + + return filepath.Dir(p), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_go18.go b/vendor/github.com/kardianos/osext/osext_go18.go new file mode 100644 index 0000000..009d8a9 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_go18.go @@ -0,0 +1,9 @@ +//+build go1.8,!openbsd + +package osext + +import "os" + +func executable() (string, error) { + return os.Executable() +} diff --git a/vendor/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go new file mode 100644 index 0000000..95e2371 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_plan9.go @@ -0,0 +1,22 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package osext + +import ( + "os" + "strconv" + "syscall" +) + +func executable() (string, error) { + f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text") + if err != nil { + return "", err + } + defer f.Close() + return syscall.Fd2path(int(f.Fd())) +} diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go new file mode 100644 index 0000000..7b0debb --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_procfs.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8,linux !go1.8,netbsd !go1.8,solaris !go1.8,dragonfly + +package osext + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" +) + +func executable() (string, error) { + switch runtime.GOOS { + case "linux": + const deletedTag = " (deleted)" + execpath, err := os.Readlink("/proc/self/exe") + if err != nil { + return execpath, err + } + execpath = strings.TrimSuffix(execpath, deletedTag) + execpath = strings.TrimPrefix(execpath, deletedTag) + return execpath, nil + case "netbsd": + return os.Readlink("/proc/curproc/exe") + case "dragonfly": + return os.Readlink("/proc/curproc/file") + case "solaris": + return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) + } + return "", errors.New("ExecPath not implemented for " + runtime.GOOS) +} diff --git a/vendor/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go new file mode 100644 index 0000000..33cee25 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_sysctl.go @@ -0,0 +1,126 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8,darwin !go1.8,freebsd openbsd + +package osext + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "syscall" + "unsafe" +) + +var initCwd, initCwdErr = os.Getwd() + +func executable() (string, error) { + var mib [4]int32 + switch runtime.GOOS { + case "freebsd": + mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} + case "darwin": + mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} + case "openbsd": + mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */} + } + + n := uintptr(0) + // Get length. + _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + buf := make([]byte, n) + _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + + var execPath string + switch runtime.GOOS { + case "openbsd": + // buf now contains **argv, with pointers to each of the C-style + // NULL terminated arguments. + var args []string + argv := uintptr(unsafe.Pointer(&buf[0])) + Loop: + for { + argp := *(**[1 << 20]byte)(unsafe.Pointer(argv)) + if argp == nil { + break + } + for i := 0; uintptr(i) < n; i++ { + // we don't want the full arguments list + if string(argp[i]) == " " { + break Loop + } + if argp[i] != 0 { + continue + } + args = append(args, string(argp[:i])) + n -= uintptr(i) + break + } + if n < unsafe.Sizeof(argv) { + break + } + argv += unsafe.Sizeof(argv) + n -= unsafe.Sizeof(argv) + } + execPath = args[0] + // There is no canonical way to get an executable path on + // OpenBSD, so check PATH in case we are called directly + if execPath[0] != '/' && execPath[0] != '.' { + execIsInPath, err := exec.LookPath(execPath) + if err == nil { + execPath = execIsInPath + } + } + default: + for i, v := range buf { + if v == 0 { + buf = buf[:i] + break + } + } + execPath = string(buf) + } + + var err error + // execPath will not be empty due to above checks. + // Try to get the absolute path if the execPath is not rooted. + if execPath[0] != '/' { + execPath, err = getAbs(execPath) + if err != nil { + return execPath, err + } + } + // For darwin KERN_PROCARGS may return the path to a symlink rather than the + // actual executable. + if runtime.GOOS == "darwin" { + if execPath, err = filepath.EvalSymlinks(execPath); err != nil { + return execPath, err + } + } + return execPath, nil +} + +func getAbs(execPath string) (string, error) { + if initCwdErr != nil { + return execPath, initCwdErr + } + // The execPath may begin with a "../" or a "./" so clean it first. + // Join the two paths, trailing and starting slashes undetermined, so use + // the generic Join function. + return filepath.Join(initCwd, filepath.Clean(execPath)), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go new file mode 100644 index 0000000..074b3b3 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_windows.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package osext + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") +) + +// GetModuleFileName() with hModule = NULL +func executable() (exePath string, err error) { + return getModuleFileName() +} + +func getModuleFileName() (string, error) { + var n uint32 + b := make([]uint16, syscall.MAX_PATH) + size := uint32(len(b)) + + r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) + n = uint32(r0) + if n == 0 { + return "", e1 + } + return string(utf16.Decode(b[0:n])), nil +} diff --git a/vendor/github.com/pressly/chi/CHANGELOG.md b/vendor/github.com/pressly/chi/CHANGELOG.md new file mode 100644 index 0000000..754cd2f --- /dev/null +++ b/vendor/github.com/pressly/chi/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +## v2.0.0 (2017-01-06) + +- After many months of v2 being in an RC state with many companies and users running it in +production, the inclusion of some improvements to the middlewares, we are very pleased to +announce v2.0.0 of chi. + + +## v2.0.0-rc1 (2016-07-26) + +- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular +community `"net/context"` package has been included in the standard library as `"context"` and +utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other +request-scoped values. We're very excited about the new context addition and are proud to +introduce chi v2, a minimal and powerful routing package for building large HTTP services, +with zero external dependencies. Chi focuses on idiomatic design and encourages the use of +stdlib HTTP handlers and middlwares. +- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` +- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` +- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, + which provides direct access to URL routing parameters, the routing path and the matching + routing patterns. +- Users upgrading from chi v1 to v2, need to: + 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to + the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` + 2. Use `chi.URLParam(r *http.Request, paramKey string) string` + or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value + + +## v1.0.0 (2016-07-01) + +- Released chi v1 stable https://github.com/pressly/chi/tree/v1.0.0 for Go 1.6 and older. + + +## v0.9.0 (2016-03-31) + +- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/pressly/chi/pull/33) +- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters + has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/pressly/chi/CONTRIBUTING.md b/vendor/github.com/pressly/chi/CONTRIBUTING.md new file mode 100644 index 0000000..5ea99fa --- /dev/null +++ b/vendor/github.com/pressly/chi/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing + +## Prerequisites + +1. [Install Go][go-install]. +2. Download the sources and switch the working directory: + + ```bash + go get -u -d github.com/pressly/chi + cd $GOPATH/src/github.com/pressly/chi + ``` + +## Submitting a Pull Request + +A typical workflow is: + +1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] +2. [Create a topic branch.][branch] +3. Add tests for your change. +4. Run `go test`. If your tests pass, return to the step 3. +5. Implement the change and ensure the steps from the previous step pass. +6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. +7. [Add, commit and push your changes.][git-help] +8. [Submit a pull request.][pull-req] + +[go-install]: https://golang.org/doc/install +[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html +[fork]: https://help.github.com/articles/fork-a-repo +[branch]: http://learn.github.com/p/branching.html +[git-help]: https://guides.github.com +[pull-req]: https://help.github.com/articles/using-pull-requests diff --git a/vendor/github.com/pressly/chi/LICENSE b/vendor/github.com/pressly/chi/LICENSE new file mode 100644 index 0000000..7b5f914 --- /dev/null +++ b/vendor/github.com/pressly/chi/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka) + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/pressly/chi/README.md b/vendor/github.com/pressly/chi/README.md new file mode 100644 index 0000000..19cb9b6 --- /dev/null +++ b/vendor/github.com/pressly/chi/README.md @@ -0,0 +1,408 @@ + +=== + +[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] + +`chi` is a lightweight, idiomatic and composable router for building Go 1.7+ HTTP services. It's +especially good at helping you write large REST API services that are kept maintainable as your +project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to +handle signaling, cancelation and request-scoped values across a handler chain. + +The focus of the project has been to seek out an elegant and comfortable design for writing +REST API servers, written during the development of the Pressly API service that powers our +public API service, which in turn powers all of our client-side applications. + +The key considerations of chi's design are: project structure, maintainability, standard http +handlers (stdlib-only), developer productivity, and deconstructing a large system into many small +parts. The core router `github.com/pressly/chi` is quite small (less than 1000 LOC), but we've also +included some useful/optional subpackages: `middleware`, `render` and `docgen`. We hope you enjoy it too! + +## Install + +`go get -u github.com/pressly/chi` + + +## Features + +* **Lightweight** - cloc'd in <1000 LOC for the chi router +* **Fast** - yes, see [benchmarks](#benchmarks) +* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compat with `net/http` +* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting +* **Context control** - built on new `context` package, providing value chaining, cancelations and timeouts +* **Robust** - tested / used in production at Pressly.com, and many others +* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown +* **No external dependencies** - plain ol' Go 1.7+ stdlib + net/http + + +## Examples + +* [rest](https://github.com/pressly/chi/blob/master/_examples/rest/main.go) - REST APIs made easy, productive and maintainable +* [logging](https://github.com/pressly/chi/blob/master/_examples/logging/main.go) - Easy structured logging for any backend +* [limits](https://github.com/pressly/chi/blob/master/_examples/limits/main.go) - Timeouts and Throttling +* [todos-resource](https://github.com/pressly/chi/blob/master/_examples/todos-resource/main.go) - Struct routers/handlers, an example of another code layout style +* [versions](https://github.com/pressly/chi/blob/master/_examples/versions/main.go) - Demo of `chi/render` subpkg +* [fileserver](https://github.com/pressly/chi/blob/master/_examples/fileserver/main.go) - Easily serve static files +* [graceful](https://github.com/pressly/chi/blob/master/_examples/graceful/main.go) - Graceful context signaling and server shutdown + + +**As easy as:** + +```go +package main + +import ( + "net/http" + "github.com/pressly/chi" +) + +func main() { + r := chi.NewRouter() + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + http.ListenAndServe(":3000", r) +} +``` + +**REST Preview:** + +Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs +in JSON ([routes.json](https://github.com/pressly/chi/blob/master/_examples/rest/routes.json)) and in +Markdown ([routes.md](https://github.com/pressly/chi/blob/master/_examples/rest/routes.md)). + +I highly recommend reading the source of the [examples](#examples) listed above, they will show you all the features +of chi and serve as a good form of documentation. + +```go +import ( + //... + "context" + "github.com/pressly/chi" + "github.com/pressly/chi/middleware" +) + +func main() { + r := chi.NewRouter() + + // A good base middleware stack + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + // When a client closes their connection midway through a request, the + // http.CloseNotifier will cancel the request context (ctx). + r.Use(middleware.CloseNotify) + + // Set a timeout value on the request context (ctx), that will signal + // through ctx.Done() that the request has timed out and further + // processing should be stopped. + r.Use(middleware.Timeout(60 * time.Second)) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + + // RESTy routes for "articles" resource + r.Route("/articles", func(r chi.Router) { + r.With(paginate).Get("/", listArticles) // GET /articles + r.Post("/", createArticle) // POST /articles + r.Get("/search", searchArticles) // GET /articles/search + + r.Route("/:articleID", func(r chi.Router) { + r.Use(ArticleCtx) + r.Get("/", getArticle) // GET /articles/123 + r.Put("/", updateArticle) // PUT /articles/123 + r.Delete("/", deleteArticle) // DELETE /articles/123 + }) + }) + + // Mount the admin sub-router + r.Mount("/admin", adminRouter()) + + http.ListenAndServe(":3333", r) +} + +func ArticleCtx(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + articleID := chi.URLParam(r, "articleID") + article, err := dbGetArticle(articleID) + if err != nil { + http.Error(w, http.StatusText(404), 404) + return + } + ctx := context.WithValue(r.Context(), "article", article) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getArticle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + article, ok := ctx.Value("article").(*Article) + if !ok { + http.Error(w, http.StatusText(422), 422) + return + } + w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) +} + +// A completely separate router for administrator routes +func adminRouter() http.Handler { + r := chi.NewRouter() + r.Use(AdminOnly) + r.Get("/", adminIndex) + r.Get("/accounts", adminListAccounts) + return r +} + +func AdminOnly(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + perm, ok := ctx.Value("acl.permission").(YourPermissionType) + if !ok || !perm.IsAdmin() { + http.Error(w, http.StatusText(403), 403) + return + } + next.ServeHTTP(w, r) + }) +} +``` + + +## Router design + +chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). +The router is fully compatible with `net/http`. + +Built on top of the tree is the `Router` interface: + +```go +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one of more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares +} +``` + +Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern +supports named params (ie. `/users/:userID`) and wildcards (ie. `/admin/*`). + + +### Middleware handlers + +chi's middlewares are just stdlib net/http middleware handlers. There is nothing special +about them, which means the router and all the tooling is designed to be compatible and +friendly with any middleware in the community. This offers much better extensibility and reuse +of packages and is at the heart of chi's purpose. + +Here is an example of a standard net/http middleware handler using the new request context +available in Go 1.7+. This middleware sets a hypothetical user identifier on the request +context and calls the next handler in the chain. + +```go +// HTTP middleware setting a value on the request context +func MyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(r.Context(), "user", "123") + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} +``` + + +### Request handlers + +chi uses standard net/http request handlers. This little snippet is an example of a http.Handler +func that reads a user identifier from the request context - hypothetically, identifying +the user sending an authenticated request, validated+set by a previous middleware handler. + +```go +// HTTP handler accessing data from the request context. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + user := r.Context().Value("user").(string) + w.Write([]byte(fmt.Sprintf("hi %s", user))) +} +``` + + +### URL parameters + +chi's router parses and stores URL parameters right onto the request context. Here is +an example of how to access URL params in your net/http handlers. And of course, middlewares +are able to access the same information. + +```go +// HTTP handler accessing the url routing parameters. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") // from a route like /users/:userID + + ctx := r.Context() + key := ctx.Value("key").(string) + + w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) +} +``` + + +## Middlewares + +chi comes equipped with an optional `middleware` package, providing a suite of standard +`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible +with `net/http` can be used with chi's mux. + +---------------------------------------------------------------------------------------------------------- +| Middleware | Description | +|:---------------------|:--------------------------------------------------------------------------------- +| RequestID | Injects a request ID into the context of each request. | +| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP. | +| Logger | Logs the start and end of each request with the elapsed processing time. | +| Recoverer | Gracefully absorb panics and prints the stack trace. | +| NoCache | Sets response headers to prevent clients from caching. | +| Timeout | Signals to the request context when the timeout deadline is reached. | +| Throttle | Puts a ceiling on the number of concurrent requests. | +| Compress | Gzip compression for clients that accept compressed responses. | +| Profiler | Easily attach net/http/pprof to your routers. | +| StripSlashes | Strip slashes on routing paths. | +| RedirectSlashes | Redirect slashes on routing paths. | +| WithValue | Short-hand middleware to set a key/value on the request context. | +| Heartbeat | Monitoring endpoint to check the servers pulse. | +---------------------------------------------------------------------------------------------------------- + +Other cool net/http middlewares: + +* [jwtauth](https://github.com/goware/jwtauth) - JWT authenticator +* [cors](https://github.com/goware/cors) - CORS middleware +* [httpcoala](https://github.com/goware/httpcoala) - Request coalescer + +please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi middleware + + +## context? + +`context` is a tiny pkg that provides simple interface to signal context across call stacks +and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) +and is available in stdlib since go1.7. + +Learn more at https://blog.golang.org/context + +and.. +* Docs: https://golang.org/pkg/context +* Source: https://github.com/golang/go/tree/master/src/context + + +## Benchmarks + +The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark + +Comparison with other routers (as of Jan 7/17): https://gist.github.com/pkieltyka/d0814d5396c996cb3ff8076399583d1f + +```shell +BenchmarkChi_Param 5000000 398 ns/op 304 B/op 2 allocs/op +BenchmarkChi_Param5 3000000 556 ns/op 304 B/op 2 allocs/op +BenchmarkChi_Param20 1000000 1184 ns/op 304 B/op 2 allocs/op +BenchmarkChi_ParamWrite 3000000 443 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GithubStatic 3000000 427 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GithubParam 3000000 565 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GithubAll 10000 122143 ns/op 61716 B/op 406 allocs/op +BenchmarkChi_GPlusStatic 5000000 383 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GPlusParam 3000000 431 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GPlus2Params 3000000 500 ns/op 304 B/op 2 allocs/op +BenchmarkChi_GPlusAll 200000 6410 ns/op 3952 B/op 26 allocs/op +BenchmarkChi_ParseStatic 5000000 384 ns/op 304 B/op 2 allocs/op +BenchmarkChi_ParseParam 3000000 415 ns/op 304 B/op 2 allocs/op +BenchmarkChi_Parse2Params 3000000 450 ns/op 304 B/op 2 allocs/op +BenchmarkChi_ParseAll 100000 12124 ns/op 7904 B/op 52 allocs/op +BenchmarkChi_StaticAll 20000 78501 ns/op 47731 B/op 314 allocs/op +``` + +NOTE: the allocs in the benchmark above are from the calls to http.Request's +`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` +on the duplicated (alloc'd) request and returns it the new request object. This is just +how setting context on a request in Go 1.7+ works. + + +## Credits + +* Carl Jackson for https://github.com/zenazn/goji + * Parts of chi's thinking comes from goji, and chi's middleware package + sources from goji. +* Armon Dadgar for https://github.com/armon/go-radix +* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) + +We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! + + +## Beyond REST + +chi is just a http router that lets you decompose request handling into many smaller layers. +Many companies including Pressly.com (of course) use chi to write REST services for their public +APIs. But, REST is just a convention for managing state via HTTP, and there's a lot of other pieces +required to write a complete client-server system or network of microservices. + +Looking ahead beyond REST, I also recommend some newer works in the field coming from +[gRPC](https://github.com/grpc/grpc-go), [NATS](https://nats.io), [go-kit](https://github.com/go-kit/kit) +and even [graphql](https://github.com/graphql-go/graphql). They're all pretty cool with their +own unique approaches and benefits. Specifically, I'd look at gRPC since it makes client-server +communication feel like a single program on a single computer, no need to hand-write a client library +and the request/response payloads are typed contracts. NATS is pretty amazing too as a super +fast and lightweight pub-sub transport that can speak protobufs, with nice service discovery - +an excellent combination with gRPC. + + +## License + +Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) + +Licensed under [MIT License](./LICENSE) + +[GoDoc]: https://godoc.org/github.com/pressly/chi +[GoDoc Widget]: https://godoc.org/github.com/pressly/chi?status.svg +[Travis]: https://travis-ci.org/pressly/chi +[Travis Widget]: https://travis-ci.org/pressly/chi.svg?branch=master diff --git a/vendor/github.com/pressly/chi/chain.go b/vendor/github.com/pressly/chi/chain.go new file mode 100644 index 0000000..30e5247 --- /dev/null +++ b/vendor/github.com/pressly/chi/chain.go @@ -0,0 +1,47 @@ +package chi + +import "net/http" + +// Chain returns a Middlewares type from a slice of middleware handlers. +func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { + return Middlewares(middlewares) +} + +// Handler builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) Handler(h http.Handler) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +// HandlerFunc builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +type ChainHandler struct { + Middlewares Middlewares + Endpoint http.Handler + chain http.Handler +} + +func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c.chain.ServeHTTP(w, r) +} + +// chain builds a http.Handler composed of an inline middleware stack and endpoint +// handler in the order they are passed. +func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { + // Return ahead of time if there aren't any middlewares for the chain + if len(middlewares) == 0 { + return endpoint + } + + // Wrap the end handler with the middleware chain + h := middlewares[len(middlewares)-1](endpoint) + for i := len(middlewares) - 2; i >= 0; i-- { + h = middlewares[i](h) + } + + return h +} diff --git a/vendor/github.com/pressly/chi/chi.go b/vendor/github.com/pressly/chi/chi.go new file mode 100644 index 0000000..7ef709e --- /dev/null +++ b/vendor/github.com/pressly/chi/chi.go @@ -0,0 +1,98 @@ +// +// Package chi is a small, idiomatic and composable router for building HTTP services. +// +// chi requires Go 1.7 or newer. +// +// Example: +// package main +// +// import ( +// "net/http" +// +// "github.com/pressly/chi" +// "github.com/pressly/chi/middleware" +// ) +// +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) +// +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) +// +// http.ListenAndServe(":3333", r) +// } +// +// See github.com/pressly/chi/_examples/ for more in-depth examples. +// +package chi + +import "net/http" + +// NewRouter returns a new Mux object that implements the Router interface. +func NewRouter() *Mux { + return NewMux() +} + +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one of more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares +} + +// Middlewares type is a slice of standard middleware handlers with methods +// to compose middleware chains and http.Handler's. +type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/pressly/chi/context.go b/vendor/github.com/pressly/chi/context.go new file mode 100644 index 0000000..189e7a8 --- /dev/null +++ b/vendor/github.com/pressly/chi/context.go @@ -0,0 +1,138 @@ +package chi + +import ( + "context" + "net" + "net/http" +) + +var ( + RouteCtxKey = &contextKey{"RouteContext"} +) + +// Context is the default routing context set on the root node of a +// request context to track URL parameters and an optional routing path. +type Context struct { + // URL routing parameter key and values. + URLParams params + + // Routing path override used by subrouters. + RoutePath string + + // Routing pattern matching the path. + RoutePattern string + + // Routing patterns throughout the lifecycle of the request, + // across all connected routers. + RoutePatterns []string +} + +// NewRouteContext returns a new routing Context object. +func NewRouteContext() *Context { + return &Context{} +} + +// reset a routing context to its initial state. +func (x *Context) reset() { + x.URLParams = x.URLParams[:0] + x.RoutePath = "" + x.RoutePattern = "" + x.RoutePatterns = x.RoutePatterns[:0] +} + +// RouteContext returns chi's routing Context object from a +// http.Request Context. +func RouteContext(ctx context.Context) *Context { + return ctx.Value(RouteCtxKey).(*Context) +} + +// URLParam returns the url parameter from a http.Request object. +func URLParam(r *http.Request, key string) string { + if rctx := RouteContext(r.Context()); rctx != nil { + return rctx.URLParams.Get(key) + } + return "" +} + +// URLParamFromCtx returns the url parameter from a http.Request Context. +func URLParamFromCtx(ctx context.Context, key string) string { + if rctx := RouteContext(ctx); rctx != nil { + return rctx.URLParams.Get(key) + } + return "" +} + +type param struct { + Key, Value string +} + +type params []param + +func (ps *params) Add(key string, value string) { + *ps = append(*ps, param{key, value}) +} + +func (ps params) Get(key string) string { + for _, p := range ps { + if p.Key == key { + return p.Value + } + } + return "" +} + +func (ps *params) Set(key string, value string) { + idx := -1 + for i, p := range *ps { + if p.Key == key { + idx = i + break + } + } + if idx < 0 { + (*ps).Add(key, value) + } else { + (*ps)[idx] = param{key, value} + } +} + +func (ps *params) Del(key string) string { + for i, p := range *ps { + if p.Key == key { + *ps = append((*ps)[:i], (*ps)[i+1:]...) + return p.Value + } + } + return "" +} + +// ServerBaseContext wraps an http.Handler to set the request context to the +// `baseCtx`. +func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler { + fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + baseCtx := baseCtx + + // Copy over default net/http server context keys + if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok { + baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v) + } + if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok { + baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v) + } + + h.ServeHTTP(w, r.WithContext(baseCtx)) + }) + return fn +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi context value " + k.name +} diff --git a/vendor/github.com/pressly/chi/middleware/closenotify17.go b/vendor/github.com/pressly/chi/middleware/closenotify17.go new file mode 100644 index 0000000..95802b1 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/closenotify17.go @@ -0,0 +1,42 @@ +// +build go1.7,!go1.8 + +package middleware + +import ( + "context" + "net/http" +) + +// CloseNotify is a middleware that cancels ctx when the underlying +// connection has gone away. It can be used to cancel long operations +// on the server when the client disconnects before the response is ready. +// +// Note: this behaviour is standard in Go 1.8+, so the middleware does nothing +// on 1.8+ and exists just for backwards compatibility. +func CloseNotify(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + cn, ok := w.(http.CloseNotifier) + if !ok { + panic("chi/middleware: CloseNotify expects http.ResponseWriter to implement http.CloseNotifier interface") + } + closeNotifyCh := cn.CloseNotify() + + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + return + case <-closeNotifyCh: + cancel() + return + } + }() + + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/closenotify18.go b/vendor/github.com/pressly/chi/middleware/closenotify18.go new file mode 100644 index 0000000..ec54bca --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/closenotify18.go @@ -0,0 +1,17 @@ +// +build go1.8 + +package middleware + +import ( + "net/http" +) + +// CloseNotify is a middleware that cancels ctx when the underlying +// connection has gone away. It can be used to cancel long operations +// on the server when the client disconnects before the response is ready. +// +// Note: this behaviour is standard in Go 1.8+, so the middleware does nothing +// on 1.8+ and exists just for backwards compatibility. +func CloseNotify(next http.Handler) http.Handler { + return next +} diff --git a/vendor/github.com/pressly/chi/middleware/compress.go b/vendor/github.com/pressly/chi/middleware/compress.go new file mode 100644 index 0000000..17e2f3e --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/compress.go @@ -0,0 +1,212 @@ +package middleware + +import ( + "bufio" + "compress/flate" + "compress/gzip" + "errors" + "io" + "net" + "net/http" + "strings" +) + +type encoding int + +const ( + encodingNone encoding = iota + encodingGzip + encodingDeflate +) + +var defaultContentTypes = map[string]struct{}{ + "text/html": struct{}{}, + "text/css": struct{}{}, + "text/plain": struct{}{}, + "text/javascript": struct{}{}, + "application/javascript": struct{}{}, + "application/x-javascript": struct{}{}, + "application/json": struct{}{}, + "application/atom+xml": struct{}{}, + "application/rss+xml ": struct{}{}, +} + +// DefaultCompress is a middleware that compresses response +// body of predefined content types to a data format based +// on Accept-Encoding request header. It uses a default +// compression level. +func DefaultCompress(next http.Handler) http.Handler { + return Compress(flate.DefaultCompression)(next) +} + +// Compress is a middleware that compresses response +// body of a given content types to a data format based +// on Accept-Encoding request header. It uses a given +// compression level. +func Compress(level int, types ...string) func(next http.Handler) http.Handler { + contentTypes := defaultContentTypes + if len(types) > 0 { + contentTypes = make(map[string]struct{}, len(types)) + for _, t := range types { + contentTypes[t] = struct{}{} + } + } + + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + mcw := &maybeCompressResponseWriter{ + ResponseWriter: w, + w: w, + contentTypes: contentTypes, + encoding: selectEncoding(r.Header), + level: level, + } + defer mcw.Close() + + next.ServeHTTP(mcw, r) + } + + return http.HandlerFunc(fn) + } +} + +func selectEncoding(h http.Header) encoding { + enc := h.Get("Accept-Encoding") + + switch { + // TODO: + // case "br": // Brotli, experimental. Firefox 2016, to-be-in Chromium. + // case "lzma": // Opera. + // case "sdch": // Chrome, Android. Gzip output + dictionary header. + + case strings.Contains(enc, "gzip"): + // TODO: Exception for old MSIE browsers that can't handle non-HTML? + // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression + return encodingGzip + + case strings.Contains(enc, "deflate"): + // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951) + // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32 + // checksum compared to CRC-32 used in "gzip" and thus is faster. + // + // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect + // raw DEFLATE data only, without the mentioned zlib wrapper. + // Because of this major confusion, most modern browsers try it + // both ways, first looking for zlib headers. + // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548 + // + // The list of browsers having problems is quite big, see: + // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression + // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results + // + // That's why we prefer gzip over deflate. It's just more reliable + // and not significantly slower than gzip. + return encodingDeflate + + // NOTE: Not implemented, intentionally: + // case "compress": // LZW. Deprecated. + // case "bzip2": // Too slow on-the-fly. + // case "zopfli": // Too slow on-the-fly. + // case "xz": // Too slow on-the-fly. + } + + return encodingNone +} + +type maybeCompressResponseWriter struct { + http.ResponseWriter + w io.Writer + encoding encoding + contentTypes map[string]struct{} + level int + wroteHeader bool +} + +func (w *maybeCompressResponseWriter) WriteHeader(code int) { + if w.wroteHeader { + return + } + w.wroteHeader = true + defer w.ResponseWriter.WriteHeader(code) + + // Already compressed data? + if w.ResponseWriter.Header().Get("Content-Encoding") != "" { + return + } + // The content-length after compression is unknown + w.ResponseWriter.Header().Del("Content-Length") + + // Parse the first part of the Content-Type response header. + contentType := "" + parts := strings.Split(w.ResponseWriter.Header().Get("Content-Type"), ";") + if len(parts) > 0 { + contentType = parts[0] + } + + // Is the content type compressable? + if _, ok := w.contentTypes[contentType]; !ok { + return + } + + // Select the compress writer. + switch w.encoding { + case encodingGzip: + gw, err := gzip.NewWriterLevel(w.ResponseWriter, w.level) + if err != nil { + w.w = w.ResponseWriter + return + } + w.w = gw + w.ResponseWriter.Header().Set("Content-Encoding", "gzip") + + case encodingDeflate: + dw, err := flate.NewWriter(w.ResponseWriter, w.level) + if err != nil { + w.w = w.ResponseWriter + return + } + w.w = dw + w.ResponseWriter.Header().Set("Content-Encoding", "deflate") + } +} + +func (w *maybeCompressResponseWriter) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + return w.w.Write(p) +} + +func (w *maybeCompressResponseWriter) Flush() { + if f, ok := w.w.(http.Flusher); ok { + f.Flush() + } +} + +func (w *maybeCompressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := w.w.(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer") +} + +func (w *maybeCompressResponseWriter) CloseNotify() <-chan bool { + if cn, ok := w.w.(http.CloseNotifier); ok { + return cn.CloseNotify() + } + + // If the underlying writer does not implement http.CloseNotifier, return + // a channel that never receives a value. The semantics here is that the + // client never disconnnects before the request is processed by the + // http.Handler, which is close enough to the default behavior (when + // CloseNotify() is not even called). + return make(chan bool, 1) +} + +func (w *maybeCompressResponseWriter) Close() error { + if c, ok := w.w.(io.WriteCloser); ok { + return c.Close() + } + return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer") +} diff --git a/vendor/github.com/pressly/chi/middleware/compress18.go b/vendor/github.com/pressly/chi/middleware/compress18.go new file mode 100644 index 0000000..ad12b14 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/compress18.go @@ -0,0 +1,15 @@ +// +build go1.8 + +package middleware + +import ( + "errors" + "net/http" +) + +func (w *maybeCompressResponseWriter) Push(target string, opts *http.PushOptions) error { + if ps, ok := w.w.(http.Pusher); ok { + return ps.Push(target, opts) + } + return errors.New("chi/middleware: http.Pusher is unavailable on the writer") +} diff --git a/vendor/github.com/pressly/chi/middleware/heartbeat.go b/vendor/github.com/pressly/chi/middleware/heartbeat.go new file mode 100644 index 0000000..fe822fb --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/heartbeat.go @@ -0,0 +1,26 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// Heartbeat endpoint middleware useful to setting up a path like +// `/ping` that load balancers or uptime testing external services +// can make a request before hitting any routes. It's also convenient +// to place this above ACL middlewares as well. +func Heartbeat(endpoint string) func(http.Handler) http.Handler { + f := func(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write([]byte(".")) + return + } + h.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } + return f +} diff --git a/vendor/github.com/pressly/chi/middleware/logger.go b/vendor/github.com/pressly/chi/middleware/logger.go new file mode 100644 index 0000000..7d68add --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/logger.go @@ -0,0 +1,137 @@ +package middleware + +import ( + "bytes" + "context" + "log" + "net/http" + "os" + "time" +) + +var ( + LogEntryCtxKey = &contextKey{"LogEntry"} + + DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags)}) +) + +// Logger is a middleware that logs the start and end of each request, along +// with some useful data about what was requested, what the response status was, +// and how long it took to return. When standard output is a TTY, Logger will +// print in color, otherwise it will print in black and white. Logger prints a +// request ID if one is provided. +// +// Alternatively, look at https://github.com/pressly/lg and the `lg.RequestLogger` +// middleware pkg. +func Logger(next http.Handler) http.Handler { + return DefaultLogger(next) +} + +func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + entry := f.NewLogEntry(r) + ww := NewWrapResponseWriter(w, r.ProtoMajor) + + t1 := time.Now() + defer func() { + t2 := time.Now() + entry.Write(ww.Status(), ww.BytesWritten(), t2.Sub(t1)) + }() + + next.ServeHTTP(ww, WithLogEntry(r, entry)) + } + return http.HandlerFunc(fn) + } +} + +type LogFormatter interface { + NewLogEntry(r *http.Request) LogEntry +} + +type LogEntry interface { + Write(status, bytes int, elapsed time.Duration) + Panic(v interface{}, stack []byte) +} + +func GetLogEntry(r *http.Request) LogEntry { + entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry) + return entry +} + +func WithLogEntry(r *http.Request, entry LogEntry) *http.Request { + r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry)) + return r +} + +type DefaultLogFormatter struct { + Logger *log.Logger +} + +func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry { + entry := &defaultLogEntry{ + DefaultLogFormatter: l, + request: r, + buf: &bytes.Buffer{}, + } + + reqID := GetReqID(r.Context()) + if reqID != "" { + cW(entry.buf, nYellow, "[%s] ", reqID) + } + cW(entry.buf, nCyan, "\"") + cW(entry.buf, bMagenta, "%s ", r.Method) + + scheme := "http" + if r.TLS != nil { + scheme = "https" + } + cW(entry.buf, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto) + + entry.buf.WriteString("from ") + entry.buf.WriteString(r.RemoteAddr) + entry.buf.WriteString(" - ") + + return entry +} + +type defaultLogEntry struct { + *DefaultLogFormatter + request *http.Request + buf *bytes.Buffer +} + +func (l *defaultLogEntry) Write(status, bytes int, elapsed time.Duration) { + switch { + case status < 200: + cW(l.buf, bBlue, "%03d", status) + case status < 300: + cW(l.buf, bGreen, "%03d", status) + case status < 400: + cW(l.buf, bCyan, "%03d", status) + case status < 500: + cW(l.buf, bYellow, "%03d", status) + default: + cW(l.buf, bRed, "%03d", status) + } + + cW(l.buf, bBlue, " %dB", bytes) + + l.buf.WriteString(" in ") + if elapsed < 500*time.Millisecond { + cW(l.buf, nGreen, "%s", elapsed) + } else if elapsed < 5*time.Second { + cW(l.buf, nYellow, "%s", elapsed) + } else { + cW(l.buf, nRed, "%s", elapsed) + } + + l.Logger.Print(l.buf.String()) +} + +func (l *defaultLogEntry) Panic(v interface{}, stack []byte) { + panicEntry := l.NewLogEntry(l.request).(*defaultLogEntry) + cW(panicEntry.buf, bRed, "panic: %+v", v) + l.Logger.Print(panicEntry.buf.String()) + l.Logger.Print(string(stack)) +} diff --git a/vendor/github.com/pressly/chi/middleware/middleware.go b/vendor/github.com/pressly/chi/middleware/middleware.go new file mode 100644 index 0000000..be6a44f --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/middleware.go @@ -0,0 +1,12 @@ +package middleware + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi/middleware context value " + k.name +} diff --git a/vendor/github.com/pressly/chi/middleware/nocache.go b/vendor/github.com/pressly/chi/middleware/nocache.go new file mode 100644 index 0000000..7e8747f --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/nocache.go @@ -0,0 +1,58 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "time" +) + +// Unix epoch time +var epoch = time.Unix(0, 0).Format(time.RFC1123) + +// Taken from https://github.com/mytrile/nocache +var noCacheHeaders = map[string]string{ + "Expires": epoch, + "Cache-Control": "no-cache, private, max-age=0", + "Pragma": "no-cache", + "X-Accel-Expires": "0", +} + +var etagHeaders = []string{ + "ETag", + "If-Modified-Since", + "If-Match", + "If-None-Match", + "If-Range", + "If-Unmodified-Since", +} + +// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent +// a router (or subrouter) from being cached by an upstream proxy and/or client. +// +// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets: +// Expires: Thu, 01 Jan 1970 00:00:00 UTC +// Cache-Control: no-cache, private, max-age=0 +// X-Accel-Expires: 0 +// Pragma: no-cache (for HTTP/1.0 proxies/clients) +func NoCache(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + + // Delete any ETag headers that may have been set + for _, v := range etagHeaders { + if r.Header.Get(v) != "" { + r.Header.Del(v) + } + } + + // Set our NoCache headers + for k, v := range noCacheHeaders { + w.Header().Set(k, v) + } + + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/profiler.go b/vendor/github.com/pressly/chi/middleware/profiler.go new file mode 100644 index 0000000..f2b843c --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/profiler.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/pressly/chi" +) + +// Profiler is a convenient subrouter used for mounting net/http/pprof. ie. +// +// func MyService() http.Handler { +// r := chi.NewRouter() +// // ..middlewares +// r.Mount("/debug", middleware.Profiler()) +// // ..routes +// return r +// } +func Profiler() http.Handler { + r := chi.NewRouter() + r.Use(NoCache) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, r.RequestURI+"/pprof/", 301) + }) + r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, r.RequestURI+"/", 301) + }) + + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.Handle("/pprof/block", pprof.Handler("block")) + r.Handle("/pprof/heap", pprof.Handler("heap")) + r.Handle("/pprof/goroutine", pprof.Handler("goroutine")) + r.Handle("/pprof/threadcreate", pprof.Handler("threadcreate")) + r.HandleFunc("/vars", expVars) + + return r +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/pressly/chi/middleware/realip.go b/vendor/github.com/pressly/chi/middleware/realip.go new file mode 100644 index 0000000..e9addbe --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/realip.go @@ -0,0 +1,54 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "strings" +) + +var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") +var xRealIP = http.CanonicalHeaderKey("X-Real-IP") + +// RealIP is a middleware that sets a http.Request's RemoteAddr to the results +// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that +// order). +// +// This middleware should be inserted fairly early in the middleware stack to +// ensure that subsequent layers (e.g., request loggers) which examine the +// RemoteAddr will see the intended value. +// +// You should only use this middleware if you can trust the headers passed to +// you (in particular, the two headers this middleware uses), for example +// because you have placed a reverse proxy like HAProxy or nginx in front of +// Goji. If your reverse proxies are configured to pass along arbitrary header +// values from the client, or if you use this middleware without a reverse +// proxy, malicious clients will be able to make you very sad (or, depending on +// how you're using RemoteAddr, vulnerable to an attack of some sort). +func RealIP(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if rip := realIP(r); rip != "" { + r.RemoteAddr = rip + } + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +func realIP(r *http.Request) string { + var ip string + + if xff := r.Header.Get(xForwardedFor); xff != "" { + i := strings.Index(xff, ", ") + if i == -1 { + i = len(xff) + } + ip = xff[:i] + } else if xrip := r.Header.Get(xRealIP); xrip != "" { + ip = xrip + } + + return ip +} diff --git a/vendor/github.com/pressly/chi/middleware/recoverer.go b/vendor/github.com/pressly/chi/middleware/recoverer.go new file mode 100644 index 0000000..dc9b64c --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/recoverer.go @@ -0,0 +1,36 @@ +package middleware + +// The original work was derived from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "runtime/debug" +) + +// Recoverer is a middleware that recovers from panics, logs the panic (and a +// backtrace), and returns a HTTP 500 (Internal Server Error) status if +// possible. Recoverer prints a request ID if one is provided. +// +// Alternatively, look at https://github.com/pressly/lg middleware pkgs. +func Recoverer(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + defer func() { + if rvr := recover(); rvr != nil { + + logEntry := GetLogEntry(r) + if logEntry != nil { + logEntry.Panic(rvr, debug.Stack()) + } else { + debug.PrintStack() + } + + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + } + }() + + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/request_id.go b/vendor/github.com/pressly/chi/middleware/request_id.go new file mode 100644 index 0000000..4574bde --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/request_id.go @@ -0,0 +1,88 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "os" + "strings" + "sync/atomic" +) + +// Key to use when setting the request ID. +type ctxKeyRequestID int + +// RequestIDKey is the key that holds th unique request ID in a request context. +const RequestIDKey ctxKeyRequestID = 0 + +var prefix string +var reqid uint64 + +// A quick note on the statistics here: we're trying to calculate the chance that +// two randomly generated base62 prefixes will collide. We use the formula from +// http://en.wikipedia.org/wiki/Birthday_problem +// +// P[m, n] \approx 1 - e^{-m^2/2n} +// +// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server +// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$ +// +// For a $k$ character base-62 identifier, we have $n(k) = 62^k$ +// +// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for +// our purposes, and is surely more than anyone would ever need in practice -- a +// process that is rebooted a handful of times a day for a hundred years has less +// than a millionth of a percent chance of generating two colliding IDs. + +func init() { + hostname, err := os.Hostname() + if hostname == "" || err != nil { + hostname = "localhost" + } + var buf [12]byte + var b64 string + for len(b64) < 10 { + rand.Read(buf[:]) + b64 = base64.StdEncoding.EncodeToString(buf[:]) + b64 = strings.NewReplacer("+", "", "/", "").Replace(b64) + } + + prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10]) +} + +// RequestID is a middleware that injects a request ID into the context of each +// request. A request ID is a string of the form "host.example.com/random-0001", +// where "random" is a base62 random string that uniquely identifies this go +// process, and where the last number is an atomically incremented request +// counter. +func RequestID(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + myid := atomic.AddUint64(&reqid, 1) + ctx := r.Context() + ctx = context.WithValue(ctx, RequestIDKey, fmt.Sprintf("%s-%06d", prefix, myid)) + next.ServeHTTP(w, r.WithContext(ctx)) + } + return http.HandlerFunc(fn) +} + +// GetReqID returns a request ID from the given context if one is present. +// Returns the empty string if a request ID cannot be found. +func GetReqID(ctx context.Context) string { + if ctx == nil { + return "" + } + if reqID, ok := ctx.Value(RequestIDKey).(string); ok { + return reqID + } + return "" +} + +// NextRequestID generates the next request ID in the sequence. +func NextRequestID() uint64 { + return atomic.AddUint64(&reqid, 1) +} diff --git a/vendor/github.com/pressly/chi/middleware/strip.go b/vendor/github.com/pressly/chi/middleware/strip.go new file mode 100644 index 0000000..cff4d7a --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/strip.go @@ -0,0 +1,48 @@ +package middleware + +import ( + "net/http" + + "github.com/pressly/chi" +) + +// StripSlashes is a middleware that will match request paths with a trailing +// slash, strip it from the path and continue routing through the mux, if a route +// matches, then it will serve the handler. +func StripSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + var path string + rctx := chi.RouteContext(r.Context()) + if rctx.RoutePath != "" { + path = rctx.RoutePath + } else { + path = r.URL.Path + } + if len(path) > 1 && path[len(path)-1] == '/' { + rctx.RoutePath = path[:len(path)-1] + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} + +// RedirectSlashes is a middleware that will match request paths with a trailing +// slash and redirect to the same path, less the trailing slash. +func RedirectSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + var path string + rctx := chi.RouteContext(r.Context()) + if rctx.RoutePath != "" { + path = rctx.RoutePath + } else { + path = r.URL.Path + } + if len(path) > 1 && path[len(path)-1] == '/' { + path = path[:len(path)-1] + http.Redirect(w, r, path, 301) + return + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/pressly/chi/middleware/terminal.go b/vendor/github.com/pressly/chi/middleware/terminal.go new file mode 100644 index 0000000..79930a2 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/terminal.go @@ -0,0 +1,63 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "fmt" + "io" + "os" +) + +var ( + // Normal colors + nBlack = []byte{'\033', '[', '3', '0', 'm'} + nRed = []byte{'\033', '[', '3', '1', 'm'} + nGreen = []byte{'\033', '[', '3', '2', 'm'} + nYellow = []byte{'\033', '[', '3', '3', 'm'} + nBlue = []byte{'\033', '[', '3', '4', 'm'} + nMagenta = []byte{'\033', '[', '3', '5', 'm'} + nCyan = []byte{'\033', '[', '3', '6', 'm'} + nWhite = []byte{'\033', '[', '3', '7', 'm'} + // Bright colors + bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'} + bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'} + bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'} + bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'} + bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'} + bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'} + bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'} + bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'} + + reset = []byte{'\033', '[', '0', 'm'} +) + +var isTTY bool + +func init() { + // This is sort of cheating: if stdout is a character device, we assume + // that means it's a TTY. Unfortunately, there are many non-TTY + // character devices, but fortunately stdout is rarely set to any of + // them. + // + // We could solve this properly by pulling in a dependency on + // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a + // heuristic for whether to print in color or in black-and-white, I'd + // really rather not. + fi, err := os.Stdout.Stat() + if err == nil { + m := os.ModeDevice | os.ModeCharDevice + isTTY = fi.Mode()&m == m + } +} + +// colorWrite +func cW(w io.Writer, color []byte, s string, args ...interface{}) { + if isTTY { + w.Write(color) + } + fmt.Fprintf(w, s, args...) + if isTTY { + w.Write(reset) + } +} diff --git a/vendor/github.com/pressly/chi/middleware/throttler.go b/vendor/github.com/pressly/chi/middleware/throttler.go new file mode 100644 index 0000000..d935e2c --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/throttler.go @@ -0,0 +1,101 @@ +package middleware + +import ( + "net/http" + "time" +) + +const ( + errCapacityExceeded = "Server capacity exceeded." + errTimedOut = "Timed out while waiting for a pending request to complete." + errContextCanceled = "Context was canceled." +) + +var ( + defaultBacklogTimeout = time.Second * 60 +) + +// Throttle is a middleware that limits number of currently processed requests +// at a time. +func Throttle(limit int) func(http.Handler) http.Handler { + return ThrottleBacklog(limit, 0, defaultBacklogTimeout) +} + +// ThrottleBacklog is a middleware that limits number of currently processed +// requests at a time and provides a backlog for holding a finite number of +// pending requests. +func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler { + if limit < 1 { + panic("chi/middleware: Throttle expects limit > 0") + } + + if backlogLimit < 0 { + panic("chi/middleware: Throttle expects backlogLimit to be positive") + } + + t := throttler{ + tokens: make(chan token, limit), + backlogTokens: make(chan token, limit+backlogLimit), + backlogTimeout: backlogTimeout, + } + + // Filling tokens. + for i := 0; i < limit+backlogLimit; i++ { + if i < limit { + t.tokens <- token{} + } + t.backlogTokens <- token{} + } + + fn := func(h http.Handler) http.Handler { + t.h = h + return &t + } + + return fn +} + +// token represents a request that is being processed. +type token struct{} + +// throttler limits number of currently processed requests at a time. +type throttler struct { + h http.Handler + tokens chan token + backlogTokens chan token + backlogTimeout time.Duration +} + +// ServeHTTP is the primary throttler request handler +func (t *throttler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + select { + case <-ctx.Done(): + http.Error(w, errContextCanceled, http.StatusServiceUnavailable) + return + case btok := <-t.backlogTokens: + timer := time.NewTimer(t.backlogTimeout) + + defer func() { + t.backlogTokens <- btok + }() + + select { + case <-timer.C: + http.Error(w, errTimedOut, http.StatusServiceUnavailable) + return + case <-ctx.Done(): + http.Error(w, errContextCanceled, http.StatusServiceUnavailable) + return + case tok := <-t.tokens: + defer func() { + t.tokens <- tok + }() + t.h.ServeHTTP(w, r) + } + return + default: + http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable) + return + } +} diff --git a/vendor/github.com/pressly/chi/middleware/timeout.go b/vendor/github.com/pressly/chi/middleware/timeout.go new file mode 100644 index 0000000..5cabf1f --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/timeout.go @@ -0,0 +1,48 @@ +package middleware + +import ( + "context" + "net/http" + "time" +) + +// Timeout is a middleware that cancels ctx after a given timeout and return +// a 504 Gateway Timeout error to the client. +// +// It's required that you select the ctx.Done() channel to check for the signal +// if the context has reached its deadline and return, otherwise the timeout +// signal will be just ignored. +// +// ie. a route/handler may look like: +// +// r.Get("/long", func(ctx context.Context, w http.ResponseWriter, r *http.Request) { +// processTime := time.Duration(rand.Intn(4)+1) * time.Second +// +// select { +// case <-ctx.Done(): +// return +// +// case <-time.After(processTime): +// // The above channel simulates some hard work. +// } +// +// w.Write([]byte("done")) +// }) +// +func Timeout(timeout time.Duration) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer func() { + cancel() + if ctx.Err() == context.DeadlineExceeded { + w.WriteHeader(http.StatusGatewayTimeout) + } + }() + + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/pressly/chi/middleware/value.go b/vendor/github.com/pressly/chi/middleware/value.go new file mode 100644 index 0000000..fbbd039 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/value.go @@ -0,0 +1,17 @@ +package middleware + +import ( + "context" + "net/http" +) + +// WithValue is a middleware that sets a given key/value in a context chain. +func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(context.WithValue(r.Context(), key, val)) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/pressly/chi/middleware/wrap_writer.go b/vendor/github.com/pressly/chi/middleware/wrap_writer.go new file mode 100644 index 0000000..9991736 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/wrap_writer.go @@ -0,0 +1,144 @@ +package middleware + +// The original work was derived from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook +// into various parts of the response process. +type WrapResponseWriter interface { + http.ResponseWriter + // Status returns the HTTP status of the request, or 0 if one has not + // yet been sent. + Status() int + // BytesWritten returns the total number of bytes sent to the client. + BytesWritten() int + // Tee causes the response body to be written to the given io.Writer in + // addition to proxying the writes through. Only one io.Writer can be + // tee'd to at once: setting a second one will overwrite the first. + // Writes will be sent to the proxy before being written to this + // io.Writer. It is illegal for the tee'd writer to be modified + // concurrently with writes. + Tee(io.Writer) + // Unwrap returns the original proxied target. + Unwrap() http.ResponseWriter +} + +// basicWriter wraps a http.ResponseWriter that implements the minimal +// http.ResponseWriter interface. +type basicWriter struct { + http.ResponseWriter + wroteHeader bool + code int + bytes int + tee io.Writer +} + +func (b *basicWriter) WriteHeader(code int) { + if !b.wroteHeader { + b.code = code + b.wroteHeader = true + b.ResponseWriter.WriteHeader(code) + } +} +func (b *basicWriter) Write(buf []byte) (int, error) { + b.WriteHeader(http.StatusOK) + n, err := b.ResponseWriter.Write(buf) + if b.tee != nil { + _, err2 := b.tee.Write(buf[:n]) + // Prefer errors generated by the proxied writer. + if err == nil { + err = err2 + } + } + b.bytes += n + return n, err +} +func (b *basicWriter) maybeWriteHeader() { + if !b.wroteHeader { + b.WriteHeader(http.StatusOK) + } +} +func (b *basicWriter) Status() int { + return b.code +} +func (b *basicWriter) BytesWritten() int { + return b.bytes +} +func (b *basicWriter) Tee(w io.Writer) { + b.tee = w +} +func (b *basicWriter) Unwrap() http.ResponseWriter { + return b.ResponseWriter +} + +type flushWriter struct { + basicWriter +} + +func (f *flushWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.Flusher = &flushWriter{} + +// httpFancyWriter is a HTTP writer that additionally satisfies http.CloseNotifier, +// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type httpFancyWriter struct { + basicWriter +} + +func (f *httpFancyWriter) CloseNotify() <-chan bool { + cn := f.basicWriter.ResponseWriter.(http.CloseNotifier) + return cn.CloseNotify() +} +func (f *httpFancyWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} +func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj := f.basicWriter.ResponseWriter.(http.Hijacker) + return hj.Hijack() +} +func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) { + if f.basicWriter.tee != nil { + return io.Copy(&f.basicWriter, r) + } + rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) + f.basicWriter.maybeWriteHeader() + return rf.ReadFrom(r) +} + +var _ http.CloseNotifier = &httpFancyWriter{} +var _ http.Flusher = &httpFancyWriter{} +var _ http.Hijacker = &httpFancyWriter{} +var _ io.ReaderFrom = &httpFancyWriter{} + +// http2FancyWriter is a HTTP2 writer that additionally satisfies http.CloseNotifier, +// http.Flusher, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type http2FancyWriter struct { + basicWriter +} + +func (f *http2FancyWriter) CloseNotify() <-chan bool { + cn := f.basicWriter.ResponseWriter.(http.CloseNotifier) + return cn.CloseNotify() +} +func (f *http2FancyWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.CloseNotifier = &http2FancyWriter{} +var _ http.Flusher = &http2FancyWriter{} diff --git a/vendor/github.com/pressly/chi/middleware/wrap_writer17.go b/vendor/github.com/pressly/chi/middleware/wrap_writer17.go new file mode 100644 index 0000000..c60df60 --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/wrap_writer17.go @@ -0,0 +1,34 @@ +// +build go1.7,!go1.8 + +package middleware + +import ( + "io" + "net/http" +) + +// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to +// hook into various parts of the response process. +func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + + bw := basicWriter{ResponseWriter: w} + + if protoMajor == 2 { + if cn && fl { + return &http2FancyWriter{bw} + } + } else { + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + if cn && fl && hj && rf { + return &httpFancyWriter{bw} + } + } + if fl { + return &flushWriter{bw} + } + + return &bw +} diff --git a/vendor/github.com/pressly/chi/middleware/wrap_writer18.go b/vendor/github.com/pressly/chi/middleware/wrap_writer18.go new file mode 100644 index 0000000..9233d8b --- /dev/null +++ b/vendor/github.com/pressly/chi/middleware/wrap_writer18.go @@ -0,0 +1,41 @@ +// +build go1.8 + +package middleware + +import ( + "io" + "net/http" +) + +// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to +// hook into various parts of the response process. +func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + + bw := basicWriter{ResponseWriter: w} + + if protoMajor == 2 { + _, ps := w.(http.Pusher) + if cn && fl && ps { + return &http2FancyWriter{bw} + } + } else { + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + if cn && fl && hj && rf { + return &httpFancyWriter{bw} + } + } + if fl { + return &flushWriter{bw} + } + + return &bw +} + +func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error { + return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var _ http.Pusher = &http2FancyWriter{} diff --git a/vendor/github.com/pressly/chi/mux.go b/vendor/github.com/pressly/chi/mux.go new file mode 100644 index 0000000..b16dab1 --- /dev/null +++ b/vendor/github.com/pressly/chi/mux.go @@ -0,0 +1,408 @@ +package chi + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" +) + +var _ Router = &Mux{} + +// Mux is a simple HTTP route multiplexer that parses a request path, +// records any URL params, and executes an end handler. It implements +// the http.Handler interface and is friendly with the standard library. +// +// Mux is designed to be fast, minimal and offer a powerful API for building +// modular and composable HTTP services with a large set of handlers. It's +// particularly useful for writing large REST API services that break a handler +// into many smaller parts composed of middlewares and end handlers. +type Mux struct { + // The radix trie router + tree *node + + // The middleware stack + middlewares []func(http.Handler) http.Handler + + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. + inline bool + + // The computed mux handler made of the chained middleware stack and + // the tree router + handler http.Handler + + // Routing context pool + pool sync.Pool + + // Custom route not found handler + notFoundHandler http.HandlerFunc + + // Custom method not allowed handler + methodNotAllowedHandler http.HandlerFunc +} + +// NewMux returns a newly initialized Mux object that implements the Router +// interface. +func NewMux() *Mux { + mux := &Mux{tree: &node{}} + mux.pool.New = func() interface{} { + return NewRouteContext() + } + return mux +} + +// ServeHTTP is the single method of the http.Handler interface that makes +// Mux interoperable with the standard library. It uses a sync.Pool to get and +// reuse routing contexts for each request. +func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Ensure the mux has some routes defined on the mux + if mx.handler == nil { + panic("chi: attempting to route to a mux with no handlers.") + } + + // Check if a routing context already exists from a parent router. + rctx, _ := r.Context().Value(RouteCtxKey).(*Context) + if rctx != nil { + mx.handler.ServeHTTP(w, r) + return + } + + // Fetch a RouteContext object from the sync pool, and call the computed + // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. + // Once the request is finished, reset the routing context and put it back + // into the pool for reuse from another request. + rctx = mx.pool.Get().(*Context) + rctx.reset() + r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) + mx.handler.ServeHTTP(w, r) + mx.pool.Put(rctx) +} + +// Use appends a middleware handler to the Mux middleware stack. +// +// The middleware stack for any Mux will execute before searching for a matching +// route to a specific handler, which provides opportunity to respond early, +// change the course of the request execution, or set request-scoped values for +// the next http.Handler. +func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { + if mx.handler != nil { + panic("chi: all middlewares must be defined before routes on a mux") + } + mx.middlewares = append(mx.middlewares, middlewares...) +} + +// Handle adds the route `pattern` that matches any http method to +// execute the `handler` http.Handler. +func (mx *Mux) Handle(pattern string, handler http.Handler) { + mx.handle(mALL, pattern, handler) +} + +// HandleFunc adds the route `pattern` that matches any http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mALL, pattern, handlerFn) +} + +// Connect adds the route `pattern` that matches a CONNECT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mCONNECT, pattern, handlerFn) +} + +// Delete adds the route `pattern` that matches a DELETE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mDELETE, pattern, handlerFn) +} + +// Get adds the route `pattern` that matches a GET http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mGET, pattern, handlerFn) +} + +// Head adds the route `pattern` that matches a HEAD http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mHEAD, pattern, handlerFn) +} + +// Options adds the route `pattern` that matches a OPTIONS http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mOPTIONS, pattern, handlerFn) +} + +// Patch adds the route `pattern` that matches a PATCH http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPATCH, pattern, handlerFn) +} + +// Post adds the route `pattern` that matches a POST http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPOST, pattern, handlerFn) +} + +// Put adds the route `pattern` that matches a PUT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPUT, pattern, handlerFn) +} + +// Trace adds the route `pattern` that matches a TRACE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mTRACE, pattern, handlerFn) +} + +// NotFound sets a custom http.HandlerFunc for routing paths that could +// not be found. The default 404 handler is `http.NotFound`. +func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { + mx.notFoundHandler = handlerFn + + mx.updateSubRoutes(func(subMux *Mux) { + if subMux.notFoundHandler == nil { + subMux.NotFound(handlerFn) + } + }) +} + +// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the +// method is unresolved. The default handler returns a 405 with an empty body. +func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { + mx.methodNotAllowedHandler = handlerFn + + mx.updateSubRoutes(func(subMux *Mux) { + if subMux.methodNotAllowedHandler == nil { + subMux.MethodNotAllowed(handlerFn) + } + }) +} + +// With adds inline middlewares for an endpoint handler. +func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { + // Similarly as in handle(), we must build the mux handler once further + // middleware registration isn't allowed for this stack, like now. + if !mx.inline && mx.handler == nil { + mx.buildRouteHandler() + } + + // Copy middlewares from parent inline muxs + var mws Middlewares + if mx.inline { + mws = make(Middlewares, len(mx.middlewares)) + copy(mws, mx.middlewares) + } + mws = append(mws, middlewares...) + + im := &Mux{inline: true, tree: mx.tree, middlewares: mws} + return im +} + +// Group creates a new inline-Mux with a fresh middleware stack. It's useful +// for a group of handlers along the same routing path that use an additional +// set of middlewares. See _examples/. +func (mx *Mux) Group(fn func(r Router)) Router { + im := mx.With().(*Mux) + if fn != nil { + fn(im) + } + return im +} + +// Route creates a new Mux with a fresh middleware stack and mounts it +// along the `pattern` as a subrouter. Effectively, this is a short-hand +// call to Mount. See _examples/. +func (mx *Mux) Route(pattern string, fn func(r Router)) Router { + subRouter := NewRouter() + if fn != nil { + fn(subRouter) + } + mx.Mount(pattern, subRouter) + return subRouter +} + +// Mount attaches another http.Handler or chi Router as a subrouter along a routing +// path. It's very useful to split up a large API as many independent routers and +// compose them as a single service using Mount. See _examples/. +// +// Note that Mount() simply sets a wildcard along the `pattern` that will continue +// routing at the `handler`, which in most cases is another chi.Router. As a result, +// if you define two Mount() routes on the exact same pattern the mount will panic. +func (mx *Mux) Mount(pattern string, handler http.Handler) { + // Provide runtime safety for ensuring a pattern isn't mounted on an existing + // routing pattern. + if mx.tree.findPattern(pattern+"*") != nil || mx.tree.findPattern(pattern+"/*") != nil { + panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) + } + + // Assign sub-Router's with the parent not found & method not allowed handler if not specified. + subr, ok := handler.(*Mux) + if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { + subr.NotFound(mx.notFoundHandler) + } + if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { + subr.MethodNotAllowed(mx.methodNotAllowedHandler) + } + + // Wrap the sub-router in a handlerFunc to scope the request path for routing. + subHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rctx := RouteContext(r.Context()) + rctx.RoutePath = "/" + rctx.URLParams.Del("*") + handler.ServeHTTP(w, r) + }) + + if pattern == "" || pattern[len(pattern)-1] != '/' { + mx.handle(mALL|mSTUB, pattern, subHandler) + mx.handle(mALL|mSTUB, pattern+"/", mx.NotFoundHandler()) + pattern += "/" + } + + method := mALL + subroutes, _ := handler.(Routes) + if subroutes != nil { + method |= mSTUB + } + n := mx.handle(method, pattern+"*", subHandler) + + if subroutes != nil { + n.subroutes = subroutes + } +} + +func (mx *Mux) Middlewares() Middlewares { + return mx.middlewares +} + +func (mx *Mux) Routes() []Route { + return mx.tree.routes() +} + +// FileServer conveniently sets up a http.FileServer handler to serve +// static files from a http.FileSystem. +func (mx *Mux) FileServer(path string, root http.FileSystem) { + if strings.ContainsAny(path, ":*") { + panic("chi: FileServer does not permit URL parameters.") + } + + fs := http.StripPrefix(path, http.FileServer(root)) + + if path != "/" && path[len(path)-1] != '/' { + mx.Get(path, http.RedirectHandler(path+"/", 301).ServeHTTP) + path += "/" + } + path += "*" + + mx.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fs.ServeHTTP(w, r) + })) +} + +// NotFoundHandler returns the default Mux 404 responder whenever a route +// cannot be found. +func (mx *Mux) NotFoundHandler() http.HandlerFunc { + if mx.notFoundHandler != nil { + return mx.notFoundHandler + } + return http.NotFound +} + +// MethodNotAllowedHandler returns the default Mux 405 responder whenever +// a method cannot be resolved for a route. +func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { + if mx.methodNotAllowedHandler != nil { + return mx.methodNotAllowedHandler + } + return methodNotAllowedHandler +} + +// buildRouteHandler builds the single mux handler that is a chain of the middleware +// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this +// point, no other middlewares can be registered on this Mux's stack. But you can still +// compose additional middlewares via Group()'s or using a chained middleware handler. +func (mx *Mux) buildRouteHandler() { + mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) +} + +// handle registers a http.Handler in the routing tree for a particular http method +// and routing pattern. +func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { + if len(pattern) == 0 || pattern[0] != '/' { + panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) + } + + // Build the final routing handler for this Mux. + if !mx.inline && mx.handler == nil { + mx.buildRouteHandler() + } + + // Build endpoint handler with inline middlewares for the route + var h http.Handler + if mx.inline { + mx.handler = http.HandlerFunc(mx.routeHTTP) + h = Chain(mx.middlewares...).Handler(handler) + } else { + h = handler + } + + // Add the endpoint to the tree and return the node + return mx.tree.InsertRoute(method, pattern, h) +} + +// routeHTTP routes a http.Request through the Mux routing tree to serve +// the matching handler for a particular http method. +func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { + // Grab the route context object + rctx := r.Context().Value(RouteCtxKey).(*Context) + + // The request routing path + routePath := rctx.RoutePath + if routePath == "" { + routePath = r.URL.Path + } + + // Check if method is supported by chi + method, ok := methodMap[r.Method] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Find the route + hs := mx.tree.FindRoute(rctx, routePath) + if hs == nil { + mx.NotFoundHandler().ServeHTTP(w, r) + return + } + + h, ok := hs[method] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Serve it up + h.ServeHTTP(w, r) +} + +// Recursively update data on child routers. +func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { + for _, r := range mx.tree.routes() { + subMux, ok := r.SubRoutes.(*Mux) + if !ok { + continue + } + fn(subMux) + } +} + +// methodNotAllowedHandler is a helper function to respond with a 405, +// method not allowed. +func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(405) + w.Write(nil) +} diff --git a/vendor/github.com/pressly/chi/tree.go b/vendor/github.com/pressly/chi/tree.go new file mode 100644 index 0000000..7e5e4fc --- /dev/null +++ b/vendor/github.com/pressly/chi/tree.go @@ -0,0 +1,535 @@ +package chi + +// Radix tree implementation below is a based on the original work by +// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go +// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. + +import ( + "net/http" + "sort" + "strings" +) + +type methodTyp int + +const ( + mCONNECT methodTyp = 1 << iota + mDELETE + mGET + mHEAD + mOPTIONS + mPATCH + mPOST + mPUT + mTRACE + mSTUB + + mALL methodTyp = mCONNECT | mDELETE | mGET | mHEAD | mOPTIONS | + mPATCH | mPOST | mPUT | mTRACE +) + +var methodMap = map[string]methodTyp{ + "CONNECT": mCONNECT, + "DELETE": mDELETE, + "GET": mGET, + "HEAD": mHEAD, + "OPTIONS": mOPTIONS, + "PATCH": mPATCH, + "POST": mPOST, + "PUT": mPUT, + "TRACE": mTRACE, +} + +type nodeTyp uint8 + +const ( + ntStatic nodeTyp = iota // /home + ntRegexp // /:id([0-9]+) or #id^[0-9]+$ + ntParam // /:user + ntCatchAll // /api/v1/* +) + +type node struct { + // node type + typ nodeTyp + + // first byte of the prefix + label byte + + // prefix is the common prefix we ignore + prefix string + + // pattern is the computed path of prefixes + pattern string + + // HTTP handler on the leaf node + handlers methodHandlers + + // chi subroutes on the leaf node + subroutes Routes + + // Child nodes should be stored in-order for iteration, + // in groups of the node type. + children [ntCatchAll + 1]nodes +} + +func (n *node) FindRoute(rctx *Context, path string) methodHandlers { + // Reset the context routing pattern + rctx.RoutePattern = "" + + // Find the routing handlers for the path + rn := n.findRoute(rctx, path) + if rn == nil { + return nil + } + + // Record the routing pattern in the request lifecycle + if rn.pattern != "" { + rctx.RoutePattern = rn.pattern + rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.RoutePattern) + } + + return rn.handlers +} + +func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { + var parent *node + search := pattern + + for { + // Handle key exhaustion + if len(search) == 0 { + // Insert or update the node's leaf handler + n.setHandler(method, handler) + n.pattern = pattern + return n + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + cn := &node{label: search[0], prefix: search, pattern: pattern} + cn.setHandler(method, handler) + parent.addChild(pattern, cn) + return cn + } + + if n.typ > ntStatic { + // We found a wildcard node, meaning search path starts with + // a wild prefix. Trim off the wildcard search path and continue. + p := strings.Index(search, "/") + if p < 0 { + p = len(search) + } + search = search[p:] + continue + } + + // Static nodes fall below here. + // Determine longest prefix of the search key on match. + commonPrefix := n.longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + // the common prefix is as long as the current node's prefix we're attempting to insert. + // keep the search going. + search = search[commonPrefix:] + continue + } + + // Split the node + child := &node{ + typ: ntStatic, + prefix: search[:commonPrefix], + } + parent.replaceChild(search[0], child) + + // Restore the existing node + n.label = n.prefix[commonPrefix] + n.prefix = n.prefix[commonPrefix:] + child.addChild(pattern, n) + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.setHandler(method, handler) + child.pattern = pattern + return child + } + + // Create a new edge for the node + subchild := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + pattern: pattern, + } + subchild.setHandler(method, handler) + child.addChild(pattern, subchild) + return subchild + } +} + +func (n *node) findPattern(pattern string) *node { + nn := n + for _, nds := range nn.children { + if len(nds) == 0 { + continue + } + + n = nn.getEdge(pattern[0]) + if n == nil { + continue + } + + idx := n.longestPrefix(pattern, n.prefix) + xpattern := pattern[idx:] + + if len(xpattern) == 0 { + return n + } else if xpattern[0] == '/' && idx < len(n.prefix) { + continue + } + + return n.findPattern(xpattern) + } + return nil +} + +func (n *node) isLeaf() bool { + return n.handlers != nil +} + +func (n *node) addChild(pattern string, child *node) { + search := child.prefix + + // Find any wildcard segments + p := strings.IndexAny(search, ":*") + + // Determine new node type + ntyp := child.typ + if p >= 0 { + switch search[p] { + case ':': + ntyp = ntParam + case '*': + ntyp = ntCatchAll + } + } + + if p == 0 { + // Path starts with a wildcard + + handlers := child.handlers + child.typ = ntyp + + if ntyp == ntCatchAll { + p = -1 + } else { + p = strings.IndexByte(search, '/') + } + if p < 0 { + p = len(search) + } + child.prefix = search[:p] + + if p != len(search) { + // add edge for the remaining part, split the end. + child.handlers = nil + + search = search[p:] + + child.addChild(pattern, &node{ + typ: ntStatic, + label: search[0], // this will always start with / + prefix: search, + pattern: pattern, + handlers: handlers, + }) + } + + } else if p > 0 { + // Path has some wildcard + + // starts with a static segment + handlers := child.handlers + child.typ = ntStatic + child.prefix = search[:p] + child.handlers = nil + + // add the wild edge node + search = search[p:] + + child.addChild(pattern, &node{ + typ: ntyp, + label: search[0], + prefix: search, + pattern: pattern, + handlers: handlers, + }) + + } else { + // Path is all static + child.typ = ntyp + + } + + n.children[child.typ] = append(n.children[child.typ], child) + n.children[child.typ].Sort() +} + +func (n *node) replaceChild(label byte, child *node) { + for i := 0; i < len(n.children[child.typ]); i++ { + if n.children[child.typ][i].label == label { + n.children[child.typ][i] = child + n.children[child.typ][i].label = label + return + } + } + + panic("chi: replacing missing child") +} + +func (n *node) getEdge(label byte) *node { + for _, nds := range n.children { + num := len(nds) + for i := 0; i < num; i++ { + if nds[i].label == label { + return nds[i] + } + } + } + return nil +} + +func (n *node) findEdge(ntyp nodeTyp, label byte) *node { + nds := n.children[ntyp] + num := len(nds) + idx := 0 + + switch ntyp { + case ntStatic: + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > nds[idx].label { + i = idx + 1 + } else if label < nds[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if nds[idx].label != label { + return nil + } + return nds[idx] + + default: // wild nodes + // TODO: right now we match them all.. but regexp should + // run through regexp matcher + return nds[idx] + } +} + +// Recursive edge traversal by checking all nodeTyp groups along the way. +// It's like searching through a multi-dimensional radix trie. +func (n *node) findRoute(rctx *Context, path string) *node { + nn := n + search := path + + for t, nds := range nn.children { + ntyp := nodeTyp(t) + if len(nds) == 0 { + continue + } + + // search subset of edges of the index for a matching node + var label byte + if search != "" { + label = search[0] + } + + xn := nn.findEdge(ntyp, label) // next node + if xn == nil { + continue + } + + // Prepare next search path by trimming prefix from requested path + xsearch := search + if xn.typ > ntStatic { + p := -1 + if xn.typ < ntCatchAll { + p = strings.IndexByte(xsearch, '/') + } + if p < 0 { + p = len(xsearch) + } + + if xn.typ == ntCatchAll { + rctx.URLParams.Add("*", xsearch) + } else { + rctx.URLParams.Add(xn.prefix[1:], xsearch[:p]) + } + + xsearch = xsearch[p:] + } else if strings.HasPrefix(xsearch, xn.prefix) { + xsearch = xsearch[len(xn.prefix):] + } else { + continue // no match + } + + // did we find it yet? + if len(xsearch) == 0 { + if xn.isLeaf() { + return xn + } + } + + // recursively find the next node.. + fin := xn.findRoute(rctx, xsearch) + if fin != nil { + // found a node, return it + return fin + } + + // Did not found final handler, let's remove the param here if it was set + if xn.typ > ntStatic { + if xn.typ == ntCatchAll { + rctx.URLParams.Del("*") + } else { + rctx.URLParams.Del(xn.prefix[1:]) + } + } + } + + return nil +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func (n *node) longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +func (n *node) setHandler(method methodTyp, handler http.Handler) { + if n.handlers == nil { + n.handlers = make(methodHandlers, 0) + } + if method&mSTUB == mSTUB { + n.handlers[mSTUB] = handler + } else { + n.handlers[mSTUB] = nil + } + if method&mALL == mALL { + n.handlers[mALL] = handler + for _, m := range methodMap { + n.handlers[m] = handler + } + } else { + n.handlers[method] = handler + } +} + +func (n *node) isEmpty() bool { + for _, nds := range n.children { + if len(nds) > 0 { + return false + } + } + return true +} + +func (n *node) routes() []Route { + rts := []Route{} + + n.walkRoutes(n.prefix, n, func(pattern string, handlers methodHandlers, subroutes Routes) bool { + if handlers[mSTUB] != nil && subroutes == nil { + return false + } + + if subroutes != nil && len(pattern) > 2 { + pattern = pattern[:len(pattern)-2] + } + + var hs = make(map[string]http.Handler, 0) + if handlers[mALL] != nil { + hs["*"] = handlers[mALL] + } + for mt, h := range handlers { + if h == nil { + continue + } + m := methodTypString(mt) + if m == "" { + continue + } + hs[m] = h + } + + rt := Route{pattern, hs, subroutes} + rts = append(rts, rt) + return false + }) + + return rts +} + +func (n *node) walkRoutes(pattern string, nd *node, fn walkFn) bool { + pattern = nd.pattern + + // Visit the leaf values if any + if (nd.handlers != nil || nd.subroutes != nil) && fn(pattern, nd.handlers, nd.subroutes) { + return true + } + + // Recurse on the children + for _, nds := range nd.children { + for _, nd := range nds { + if n.walkRoutes(pattern, nd, fn) { + return true + } + } + } + return false +} + +func methodTypString(method methodTyp) string { + for s, t := range methodMap { + if method == t { + return s + } + } + return "" +} + +type walkFn func(pattern string, handlers methodHandlers, subroutes Routes) bool + +// methodHandlers is a mapping of http method constants to handlers +// for a given route. +type methodHandlers map[methodTyp]http.Handler + +type nodes []*node + +// Sort the list of nodes by label +func (ns nodes) Len() int { return len(ns) } +func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } +func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodes) Sort() { sort.Sort(ns) } + +type Route struct { + Pattern string + Handlers map[string]http.Handler + SubRoutes Routes +} diff --git a/vendor/github.com/tidwall/btree/LICENSE b/vendor/github.com/tidwall/btree/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/tidwall/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tidwall/btree/README.md b/vendor/github.com/tidwall/btree/README.md new file mode 100644 index 0000000..deb1e88 --- /dev/null +++ b/vendor/github.com/tidwall/btree/README.md @@ -0,0 +1,107 @@ +BTree implementation for Go +=========================== + +![Travis CI Build Status](https://api.travis-ci.org/tidwall/btree.svg?branch=master) +[![GoDoc](https://godoc.org/github.com/tidwall/btree?status.svg)](https://godoc.org/github.com/tidwall/btree) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +This is a fork of the wonderful [google/btree](https://github.com/google/btree) package. It's has all the same great features and adds a few more. + +- Descend* functions for iterating backwards. +- Iteration performance boost. +- User defined context. + +User defined context +-------------------- +This is a great new feature that allows for entering the same item into multiple B-trees, and each B-tree have a different ordering formula. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/btree" +) + +type Item struct { + Key, Val string +} + +func (i1 *Item) Less(item btree.Item, ctx interface{}) bool { + i2 := item.(*Item) + switch tag := ctx.(type) { + case string: + if tag == "vals" { + if i1.Val < i2.Val { + return true + } else if i1.Val > i2.Val { + return false + } + // Both vals are equal so we should fall though + // and let the key comparison take over. + } + } + return i1.Key < i2.Key +} + +func main() { + + // Create a tree for keys and a tree for values. + // The "keys" tree will be sorted on the Keys field. + // The "values" tree will be sorted on the Values field. + keys := btree.New(16, "keys") + vals := btree.New(16, "vals") + + // Create some items. + users := []*Item{ + &Item{Key: "user:1", Val: "Jane"}, + &Item{Key: "user:2", Val: "Andy"}, + &Item{Key: "user:3", Val: "Steve"}, + &Item{Key: "user:4", Val: "Andrea"}, + &Item{Key: "user:5", Val: "Janet"}, + &Item{Key: "user:6", Val: "Andy"}, + } + + // Insert each user into both trees + for _, user := range users { + keys.ReplaceOrInsert(user) + vals.ReplaceOrInsert(user) + } + + // Iterate over each user in the key tree + keys.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) + + fmt.Printf("\n") + // Iterate over each user in the val tree + vals.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) +} + +// Should see the results +/* +user:1 Jane +user:2 Andy +user:3 Steve +user:4 Andrea +user:5 Janet +user:6 Andy + +user:4 Andrea +user:2 Andy +user:6 Andy +user:1 Jane +user:3 Steve +*/ +``` diff --git a/vendor/github.com/tidwall/btree/btree.go b/vendor/github.com/tidwall/btree/btree.go new file mode 100644 index 0000000..26f0d23 --- /dev/null +++ b/vendor/github.com/tidwall/btree/btree.go @@ -0,0 +1,968 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + // + // There is a user-defined ctx argument that is equal to the ctx value which + // is set at time of the btree contruction. + Less(than Item, ctx interface{}) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeList) freeNode(n *node) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } + f.mu.Unlock() +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int, ctx interface{}) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize), ctx) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList, ctx interface{}) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + ctx: ctx, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item, ctx interface{}) (index int, found bool) { + i, j := 0, len(s) + for i < j { + h := i + (j-i)/2 + if !item.Less(s[h], ctx) { + i = h + 1 + } else { + j = h + } + } + if i > 0 && !s[i-1].Less(item, ctx) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int, ctx interface{}) Item { + i, found := n.items.find(item, ctx) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree, ctx): + // no change, we want first split node + case inTree.Less(item, ctx): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems, ctx) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item, ctx interface{}) Item { + i, found := n.items.find(key, ctx) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key, ctx) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove, ctx interface{}) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item, ctx) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ, ctx) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax, ctx) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ, ctx) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove, ctx interface{}) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ, ctx) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator, ctx interface{}) (bool, bool) { + var ok bool + switch dir { + case ascend: + for i := 0; i < len(n.items); i++ { + if start != nil && n.items[i].Less(start, ctx) { + continue + } + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i], ctx) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop, ctx) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + case descend: + for i := len(n.items) - 1; i >= 0; i-- { + if start != nil && !n.items[i].Less(start, ctx) { + if !includeStart || hit || start.Less(n.items[i], ctx) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i], ctx) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + ctx interface{} + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +func (c *copyOnWriteContext) freeNode(n *node) { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + c.freelist.freeNode(n) + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems(), t.ctx) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem, t.ctx) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin, t.ctx) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax, t.ctx) +} + +func (t *BTree) deleteItem(item Item, typ toRemove, ctx interface{}) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ, ctx) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator, t.ctx) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator, t.ctx) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator, t.ctx) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator, t.ctx) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator, t.ctx) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator, t.ctx) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator, t.ctx) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator, t.ctx) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key, t.ctx) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item, ctx interface{}) bool { + return a < b.(Int) +} + +type stackItem struct { + n *node // current node + i int // index of the next child/item. +} + +// Cursor represents an iterator that can traverse over all items in the tree +// in sorted order. +// +// Changing data while traversing a cursor may result in unexpected items to +// be returned. You must reposition your cursor after mutating data. +type Cursor struct { + t *BTree + stack []stackItem +} + +// Cursor returns a new cursor used to traverse over items in the tree. +func (t *BTree) Cursor() *Cursor { + return &Cursor{t: t} +} + +// First moves the cursor to the first item in the tree and returns that item. +func (c *Cursor) First() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n}) + for len(n.children) > 0 { + n = n.children[0] + c.stack = append(c.stack, stackItem{n: n}) + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// Next moves the cursor to the next item and returns that item. +func (c *Cursor) Next() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i++ + n := c.stack[si].n + i := c.stack[si].i + if i == len(n.children)+len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + if len(n.children) == 0 { + if i >= len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + c.stack = append(c.stack, stackItem{n: n.children[i/2], i: -1}) + return c.Next() + +} + +// Last moves the cursor to the last item in the tree and returns that item. +func (c *Cursor) Last() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// Prev moves the cursor to the previous item and returns that item. +func (c *Cursor) Prev() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i-- + n := c.stack[si].n + i := c.stack[si].i + if i == -1 { + c.stack = c.stack[:len(c.stack)-1] + return c.Prev() + } + if len(n.children) == 0 { + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + child := n.children[i/2] + c.stack = append(c.stack, stackItem{n: child, + i: len(child.children) + len(child.items)}) + return c.Prev() +} + +// Seek moves the cursor to provided item and returns that item. +// If the item does not exist then the next item is returned. +func (c *Cursor) Seek(pivot Item) Item { + c.stack = c.stack[:0] + n := c.t.root + for n != nil { + i, found := n.items.find(pivot, c.t.ctx) + c.stack = append(c.stack, stackItem{n: n}) + if found { + if len(n.children) == 0 { + c.stack[len(c.stack)-1].i = i + } else { + c.stack[len(c.stack)-1].i = i*2 + 1 + } + return n.items[i] + } + if len(n.children) == 0 { + if i == len(n.items) { + c.stack[len(c.stack)-1].i = i + 1 + return c.Next() + } + c.stack[len(c.stack)-1].i = i + return n.items[i] + } + c.stack[len(c.stack)-1].i = i * 2 + n = n.children[i] + } + return nil +} diff --git a/vendor/github.com/tidwall/buntdb/LICENSE b/vendor/github.com/tidwall/buntdb/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/buntdb/README.md b/vendor/github.com/tidwall/buntdb/README.md new file mode 100644 index 0000000..e595b57 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/README.md @@ -0,0 +1,606 @@ + + +==== + +BuntDB is a low-level, in-memory, key/value store in pure Go. +It persists to disk, is ACID compliant, and uses locking for multiple +readers and a single writer. It supports custom indexes and geospatial +data. It's ideal for projects that need a dependable database and favor +speed over data size. + +The desire to create BuntDB stems from the need for a new embeddable +database for [Tile38](https://github.com/tidwall/tile38) and [SummitDB](https://github.com/tidwall/summitdb). + +Features +======== + +- In-memory database for [fast reads and writes](#performance) +- Embeddable with a [simple API](https://godoc.org/github.com/tidwall/buntdb) +- [Spatial indexing](#spatial-indexes) for up to 20 dimensions; Useful for Geospatial data +- Index fields inside [JSON](#json-indexes) documents +- [Collate i18n Indexes](#collate-i18n-indexes) using the optional [collate package](https://github.com/tidwall/collate) +- Create [custom indexes](#custom-indexes) for any data type +- Support for [multi value indexes](#multi-value-index); Similar to a SQL multi column index +- [Built-in types](#built-in-types) that are easy to get up & running; String, Uint, Int, Float +- Flexible [iteration](#iterating) of data; ascending, descending, and ranges +- [Durable append-only file](#append-only-file) format for persistence +- Option to evict old items with an [expiration](#data-expiration) TTL +- Tight codebase, under 2K loc using the `cloc` command +- ACID semantics with locking [transactions](#transactions) that support rollbacks + + +Getting Started +=============== + +## Installing + +To start using BuntDB, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/buntdb +``` + +This will retrieve the library. + + +## Opening a database + +The primary object in BuntDB is a `DB`. To open or create your +database, use the `buntdb.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/tidwall/buntdb" +) + +func main() { + // Open the data.db file. It will be created if it doesn't exist. + db, err := buntdb.Open("data.db") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +It's also possible to open a database that does not persist to disk by using `:memory:` as the path of the file. + +```go +buntdb.Open(":memory:") // Open a file that does not persist to disk. +``` + +## Transactions +All reads and writes must be performed from inside a transaction. BuntDB can have one write transaction opened at a time, but can have many concurrent read transactions. Each transaction maintains a stable view of the database. In other words, once a transaction has begun, the data for that transaction cannot be changed by other transactions. + +Transactions run in a function that exposes a `Tx` object, which represents the transaction state. While inside a transaction, all database operations should be performed using this object. You should never access the origin `DB` object while inside a transaction. Doing so may have side-effects, such as blocking your application. + +When a transaction fails, it will roll back, and revert all changes that occurred to the database during that transaction. There's a single return value that you can use to close the transaction. For read/write transactions, returning an error this way will force the transaction to roll back. When a read/write transaction succeeds all changes are persisted to disk. + +### Read-only Transactions +A read-only transaction should be used when you don't need to make changes to the data. The advantage of a read-only transaction is that there can be many running concurrently. + +```go +err := db.View(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +### Read/write Transactions +A read/write transaction is used when you need to make changes to your data. There can only be one read/write transaction running at a time. So make sure you close it as soon as you are done with it. + +```go +err := db.Update(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +## Setting and getting key/values + +To set a value you must open a read/write transaction: + +```go +err := db.Update(func(tx *buntdb.Tx) error { + _, _, err := tx.Set("mykey", "myvalue", nil) + return err +}) +``` + + +To get the value: + +```go +err := db.View(func(tx *buntdb.Tx) error { + val, err := tx.Get("mykey") + if err != nil{ + return err + } + fmt.Printf("value is %s\n", val) + return nil +}) +``` + +Getting non-existent values will case an `ErrNotFound` error. + +### Iterating +All keys/value pairs are ordered in the database by the key. To iterate over the keys: + +```go +err := db.View(func(tx *buntdb.Tx) error { +err := tx.Ascend("", func(key, value string) bool{ + fmt.Printf("key: %s, value: %s\n", key, value) + }) + return err +}) +``` + +There is also `AscendGreaterOrEqual`, `AscendLessThan`, `AscendRange`, `Descend`, `DescendLessOrEqual`, `DescendGreaterThan`, and `DescendRange`. Please see the [documentation](https://godoc.org/github.com/tidwall/buntdb) for more information on these functions. + + +## Custom Indexes +Initially all data is stored in a single [B-tree](https://en.wikipedia.org/wiki/B-tree) with each item having one key and one value. All of these items are ordered by the key. This is great for quickly getting a value from a key or [iterating](#iterating) over the keys. Feel free to peruse the [B-tree implementation](https://github.com/tidwall/btree). + +You can also create custom indexes that allow for ordering and [iterating](#iterating) over values. A custom index also uses a B-tree, but it's more flexible because it allows for custom ordering. + +For example, let's say you want to create an index for ordering names: + +```go +db.CreateIndex("names", "*", buntdb.IndexString) +``` + +This will create an index named `names` which stores and sorts all values. The second parameter is a pattern that is used to filter on keys. A `*` wildcard argument means that we want to accept all keys. `IndexString` is a built-in function that performs case-insensitive ordering on the values + +Now you can add various names: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:name", "tom", nil) + tx.Set("user:1:name", "Randi", nil) + tx.Set("user:2:name", "jane", nil) + tx.Set("user:4:name", "Janet", nil) + tx.Set("user:5:name", "Paula", nil) + tx.Set("user:6:name", "peter", nil) + tx.Set("user:7:name", "Terri", nil) + return nil +}) +``` + +Finally you can iterate over the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("names", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` +The output should be: +``` +user:2:name jane +user:4:name Janet +user:5:name Paula +user:6:name peter +user:1:name Randi +user:7:name Terri +user:0:name tom +``` + +The pattern parameter can be used to filter on keys like this: + +```go +db.CreateIndex("names", "user:*", buntdb.IndexString) +``` + +Now only items with keys that have the prefix `user:` will be added to the `names` index. + + +### Built-in types +Along with `IndexString`, there is also `IndexInt`, `IndexUint`, and `IndexFloat`. +These are built-in types for indexing. You can choose to use these or create your own. + +So to create an index that is numerically ordered on an age key, we could use: + +```go +db.CreateIndex("ages", "user:*:age", buntdb.IndexInt) +``` + +And then add values: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:age", "35", nil) + tx.Set("user:1:age", "49", nil) + tx.Set("user:2:age", "13", nil) + tx.Set("user:4:age", "63", nil) + tx.Set("user:5:age", "8", nil) + tx.Set("user:6:age", "3", nil) + tx.Set("user:7:age", "16", nil) + return nil +}) +``` + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("ages", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` + +The output should be: +``` +user:6:name 3 +user:5:name 8 +user:2:name 13 +user:7:name 16 +user:0:name 35 +user:1:name 49 +user:4:name 63 +``` + +## Spatial Indexes +BuntDB has support for spatial indexes by storing rectangles in an [R-tree](https://en.wikipedia.org/wiki/R-tree). An R-tree is organized in a similar manner as a [B-tree](https://en.wikipedia.org/wiki/B-tree), and both are balanced trees. But, an R-tree is special because it can operate on data that is in multiple dimensions. This is super handy for Geospatial applications. + +To create a spatial index use the `CreateSpatialIndex` function: + +```go +db.CreateSpatialIndex("fleet", "fleet:*:pos", buntdb.IndexRect) +``` + +Then `IndexRect` is a built-in function that converts rect strings to a format that the R-tree can use. It's easy to use this function out of the box, but you might find it better to create a custom one that renders from a different format, such as [Well-known text](https://en.wikipedia.org/wiki/Well-known_text) or [GeoJSON](http://geojson.org/). + +To add some lon,lat points to the `fleet` index: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("fleet:0:pos", "[-115.567 33.532]", nil) + tx.Set("fleet:1:pos", "[-116.671 35.735]", nil) + tx.Set("fleet:2:pos", "[-113.902 31.234]", nil) + return nil +}) +``` + +And then you can run the `Intersects` function on the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Intersects("fleet", "[-117 30],[-112 36]", func(key, val string) bool { + ... + return true + }) + return nil +}) +``` + +This will get all three positions. + +### Spatial bracket syntax + +The bracket syntax `[-117 30],[-112 36]` is unique to BuntDB, and it's how the built-in rectangles are processed. But, you are not limited to this syntax. Whatever Rect function you choose to use during `CreateSpatialIndex` will be used to process the parameter, in this case it's `IndexRect`. + +- **2D rectangle:** `[10 15],[20 25]` +*Min XY: "10x15", Max XY: "20x25"* + +- **3D rectangle:** `[10 15 12],[20 25 18]` +*Min XYZ: "10x15x12", Max XYZ: "20x25x18"* + +- **2D point:** `[10 15]` +*XY: "10x15"* + +- **LatLon point:** `[-112.2693 33.5123]` +*LatLon: "33.5123 -112.2693"* + +- **LatLon bounding box:** `[-112.26 33.51],[-112.18 33.67]` +*Min LatLon: "33.51 -112.26", Max LatLon: "33.67 -112.18"* + +**Notice:** The longitude is the Y axis and is on the left, and latitude is the X axis and is on the right. + +You can also represent `Infinity` by using `-inf` and `+inf`. +For example, you might have the following points (`[X Y M]` where XY is a point and M is a timestamp): +``` +[3 9 1] +[3 8 2] +[4 8 3] +[4 7 4] +[5 7 5] +[5 6 6] +``` + +You can then do a search for all points with `M` between 2-4 by calling `Intersects`. + +```go +tx.Intersects("points", "[-inf -inf 2],[+inf +inf 4]", func(key, val string) bool { + println(val) + return true +}) +``` + +Which will return: + +``` +[3 8 2] +[4 8 3] +[4 7 4] +``` + +## JSON Indexes +Indexes can be created on individual fields inside JSON documents. BuntDB uses [GJSON](https://github.com/tidwall/gjson) under the hood. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/buntdb" +) + +func main() { + db, _ := buntdb.Open(":memory:") + db.CreateIndex("last_name", "*", buntdb.IndexJSON("name.last")) + db.CreateIndex("age", "*", buntdb.IndexJSON("age")) + db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + return nil + }) + db.View(func(tx *buntdb.Tx) error { + fmt.Println("Order by last name") + tx.Ascend("last_name", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age") + tx.Ascend("age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age range 30-50") + tx.AscendRange("age", `{"age":30}`, `{"age":50}`, func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil + }) +} +``` + +Results: + +``` +Order by last name +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} + +Order by age +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} + +Order by age range 30-50 +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Multi Value Index +With BuntDB it's possible to join multiple values on a single index. +This is similar to a [multi column index](http://dev.mysql.com/doc/refman/5.7/en/multiple-column-indexes.html) in a traditional SQL database. + +In this example we are creating a multi value index on "name.last" and "age": + +```go +db, _ := buntdb.Open(":memory:") +db.CreateIndex("last_name_age", "*", buntdb.IndexJSON("name.last"), buntdb.IndexJSON("age")) +db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + tx.Set("5", `{"name":{"first":"Sam","last":"Anderson"},"age":51}`, nil) + tx.Set("6", `{"name":{"first":"Melinda","last":"Prichard"},"age":44}`, nil) + return nil +}) +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("last_name_age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil +}) + +// Output: +// 5: {"name":{"first":"Sam","last":"Anderson"},"age":51} +// 3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +// 4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +// 1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +// 6: {"name":{"first":"Melinda","last":"Prichard"},"age":44} +// 2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Descending Ordered Index +Any index can be put in descending order by wrapping it's less function with `buntdb.Desc`. + +```go +db.CreateIndex("last_name_age", "*", + buntdb.IndexJSON("name.last"), + buntdb.Desc(buntdb.IndexJSON("age"))) +``` + +This will create a multi value index where the last name is ascending and the age is descending. + +## Collate i18n Indexes + +Using the external [collate package](https://github.com/tidwall/collate) it's possible to create +indexes that are sorted by the specified language. This is similar to the [SQL COLLATE keyword](https://msdn.microsoft.com/en-us/library/ms174596.aspx) found in traditional databases. + +To install: + +``` +go get -u github.com/tidwall/collate +``` + +For example: + +```go +import "github.com/tidwall/collate" + +// To sort case-insensitive in French. +db.CreateIndex("name", "*", collate.IndexString("FRENCH_CI")) + +// To specify that numbers should sort numerically ("2" < "12") +// and use a comma to represent a decimal point. +db.CreateIndex("amount", "*", collate.IndexString("FRENCH_NUM")) +``` + +There's also support for Collation on JSON indexes: + +```go +db.CreateIndex("last_name", "*", collate.IndexJSON("CHINESE_CI", "name.last")) +``` + +Check out the [collate project](https://github.com/tidwall/collate) for more information. + +## Data Expiration +Items can be automatically evicted by using the `SetOptions` object in the `Set` function to set a `TTL`. + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("mykey", "myval", &buntdb.SetOptions{Expires:true, TTL:time.Second}) + return nil +}) +``` + +Now `mykey` will automatically be deleted after one second. You can remove the TTL by setting the value again with the same key/value, but with the options parameter set to nil. + +## Append-only File + +BuntDB uses an AOF (append-only file) which is a log of all database changes that occur from operations like `Set()` and `Delete()`. + +The format of this file looks like: +``` +set key:1 value1 +set key:2 value2 +set key:1 value3 +del key:2 +... +``` + +When the database opens again, it will read back the aof file and process each command in exact order. +This read process happens one time when the database opens. +From there on the file is only appended. + +As you may guess this log file can grow large over time. +There's a background routine that automatically shrinks the log file when it gets too large. +There is also a `Shrink()` function which will rewrite the aof file so that it contains only the items in the database. +The shrink operation does not lock up the database so read and write transactions can continue while shrinking is in process. + +### Durability and fsync + +By default BuntDB executes an `fsync` once every second on the [aof file](#append-only-file). Which simply means that there's a chance that up to one second of data might be lost. If you need higher durability then there's an optional database config setting `Config.SyncPolicy` which can be set to `Always`. + +The `Config.SyncPolicy` has the following options: + +- `Never` - fsync is managed by the operating system, less safe +- `EverySecond` - fsync every second, fast and safer, this is the default +- `Always` - fsync after every write, very durable, slower + +## Config + +Here are some configuration options that can be use to change various behaviors of the database. + +- **SyncPolicy** adjusts how often the data is synced to disk. This value can be Never, EverySecond, or Always. Default is EverySecond. +- **AutoShrinkPercentage** is used by the background process to trigger a shrink of the aof file when the size of the file is larger than the percentage of the result of the previous shrunk file. For example, if this value is 100, and the last shrink process resulted in a 100mb file, then the new aof file must be 200mb before a shrink is triggered. Default is 100. +- **AutoShrinkMinSize** defines the minimum size of the aof file before an automatic shrink can occur. Default is 32MB. +- **AutoShrinkDisabled** turns off automatic background shrinking. Default is false. + +To update the configuration you should call `ReadConfig` followed by `SetConfig`. For example: + +```go + +var config buntdb.Config +if err := db.ReadConfig(&config); err != nil{ + log.Fatal(err) +} +if err := db.WriteConfig(config); err != nil{ + log.Fatal(err) +} +``` + +## Performance + +How fast is BuntDB? + +Here are some example [benchmarks](https://github.com/tidwall/raft-buntdb#raftstore-performance-comparison) when using BuntDB in a Raft Store implementation. + +You can also run the standard Go benchmark tool from the project root directory: + +``` +go test --bench=. +``` + +### BuntDB-Benchmark + +There's a [custom utility](https://github.com/tidwall/buntdb-benchmark) that was created specifically for benchmarking BuntDB. + +*These are the results from running the benchmarks on a MacBook Pro 15" 2.8 GHz Intel Core i7:* + +``` +$ buntdb-benchmark -q +GET: 4609604.74 operations per second +SET: 248500.33 operations per second +ASCEND_100: 2268998.79 operations per second +ASCEND_200: 1178388.14 operations per second +ASCEND_400: 679134.20 operations per second +ASCEND_800: 348445.55 operations per second +DESCEND_100: 2313821.69 operations per second +DESCEND_200: 1292738.38 operations per second +DESCEND_400: 675258.76 operations per second +DESCEND_800: 337481.67 operations per second +SPATIAL_SET: 134824.60 operations per second +SPATIAL_INTERSECTS_100: 939491.47 operations per second +SPATIAL_INTERSECTS_200: 561590.40 operations per second +SPATIAL_INTERSECTS_400: 306951.15 operations per second +SPATIAL_INTERSECTS_800: 159673.91 operations per second +``` + +To install this utility: + +``` +go get github.com/tidwall/buntdb-benchmark +``` + + + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +BuntDB source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/buntdb/buntdb.go b/vendor/github.com/tidwall/buntdb/buntdb.go new file mode 100644 index 0000000..3a08935 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/buntdb.go @@ -0,0 +1,2062 @@ +// Package buntdb implements a low-level in-memory key/value store in pure Go. +// It persists to disk, is ACID compliant, and uses locking for multiple +// readers and a single writer. Bunt is ideal for projects that need +// a dependable database, and favor speed over data size. +package buntdb + +import ( + "bufio" + "errors" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/tidwall/btree" + "github.com/tidwall/gjson" + "github.com/tidwall/grect" + "github.com/tidwall/match" + "github.com/tidwall/rtree" +) + +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrNotFound is returned when an item or index is not in the database. + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when the database file is an invalid format. + ErrInvalid = errors.New("invalid database") + + // ErrDatabaseClosed is returned when the database is closed. + ErrDatabaseClosed = errors.New("database closed") + + // ErrIndexExists is returned when an index already exists in the database. + ErrIndexExists = errors.New("index exists") + + // ErrInvalidOperation is returned when an operation cannot be completed. + ErrInvalidOperation = errors.New("invalid operation") + + // ErrInvalidSyncPolicy is returned for an invalid SyncPolicy value. + ErrInvalidSyncPolicy = errors.New("invalid sync policy") + + // ErrShrinkInProcess is returned when a shrink operation is in-process. + ErrShrinkInProcess = errors.New("shrink is in-process") + + // ErrPersistenceActive is returned when post-loading data from an database + // not opened with Open(":memory:"). + ErrPersistenceActive = errors.New("persistence active") + + // ErrTxIterating is returned when Set or Delete are called while iterating. + ErrTxIterating = errors.New("tx is iterating") +) + +// DB represents a collection of key-value pairs that persist on disk. +// Transactions are used for all forms of data access to the DB. +type DB struct { + mu sync.RWMutex // the gatekeeper for all fields + file *os.File // the underlying file + buf []byte // a buffer to write to + keys *btree.BTree // a tree of all item ordered by key + exps *btree.BTree // a tree of items ordered by expiration + idxs map[string]*index // the index trees. + exmgr bool // indicates that expires manager is running. + flushes int // a count of the number of disk flushes + closed bool // set when the database has been closed + config Config // the database configuration + persist bool // do we write to disk + shrinking bool // when an aof shrink is in-process. + lastaofsz int // the size of the last shrink aof size +} + +// SyncPolicy represents how often data is synced to disk. +type SyncPolicy int + +const ( + // Never is used to disable syncing data to disk. + // The faster and less safe method. + Never SyncPolicy = 0 + // EverySecond is used to sync data to disk every second. + // It's pretty fast and you can lose 1 second of data if there + // is a disaster. + // This is the recommended setting. + EverySecond = 1 + // Always is used to sync data after every write to disk. + // Slow. Very safe. + Always = 2 +) + +// Config represents database configuration options. These +// options are used to change various behaviors of the database. +type Config struct { + // SyncPolicy adjusts how often the data is synced to disk. + // This value can be Never, EverySecond, or Always. + // The default is EverySecond. + SyncPolicy SyncPolicy + + // AutoShrinkPercentage is used by the background process to trigger + // a shrink of the aof file when the size of the file is larger than the + // percentage of the result of the previous shrunk file. + // For example, if this value is 100, and the last shrink process + // resulted in a 100mb file, then the new aof file must be 200mb before + // a shrink is triggered. + AutoShrinkPercentage int + + // AutoShrinkMinSize defines the minimum size of the aof file before + // an automatic shrink can occur. + AutoShrinkMinSize int + + // AutoShrinkDisabled turns off automatic background shrinking + AutoShrinkDisabled bool + + // OnExpired is used to custom handle the deletion option when a key + // has been expired. + OnExpired func(keys []string) +} + +// exctx is a simple b-tree context for ordering by expiration. +type exctx struct { + db *DB +} + +// Default number of btree degrees +const btreeDegrees = 64 + +// Open opens a database at the provided path. +// If the file does not exist then it will be created automatically. +func Open(path string) (*DB, error) { + db := &DB{} + // initialize trees and indexes + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + // initialize default configuration + db.config = Config{ + SyncPolicy: EverySecond, + AutoShrinkPercentage: 100, + AutoShrinkMinSize: 32 * 1024 * 1024, + } + // turn off persistence for pure in-memory + db.persist = path != ":memory:" + if db.persist { + var err error + // hardcoding 0666 as the default mode. + db.file, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + // load the database from disk + if err := db.load(); err != nil { + // close on error, ignore close error + _ = db.file.Close() + return nil, err + } + } + // start the background manager. + go db.backgroundManager() + return db, nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + db.closed = true + if db.persist { + db.file.Sync() // do a sync but ignore the error + if err := db.file.Close(); err != nil { + return err + } + } + // Let's release all references to nil. This will help both with debugging + // late usage panics and it provides a hint to the garbage collector + db.keys, db.exps, db.idxs, db.file = nil, nil, nil, nil + return nil +} + +// Save writes a snapshot of the database to a writer. This operation blocks all +// writes, but not reads. This can be used for snapshots and backups for pure +// in-memory databases using the ":memory:". Database that persist to disk +// can be snapshotted by simply copying the database file. +func (db *DB) Save(wr io.Writer) error { + var err error + db.mu.RLock() + defer db.mu.RUnlock() + // use a buffered writer and flush every 4MB + var buf []byte + // iterated through every item in the database and write to the buffer + db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + buf = dbi.writeSetTo(buf) + if len(buf) > 1024*1024*4 { + // flush when buffer is over 4MB + _, err = wr.Write(buf) + if err != nil { + return false + } + buf = buf[:0] + } + return true + }) + if err != nil { + return err + } + // one final flush + if len(buf) > 0 { + _, err = wr.Write(buf) + if err != nil { + return err + } + } + return nil +} + +// Load loads commands from reader. This operation blocks all reads and writes. +// Note that this can only work for fully in-memory databases opened with +// Open(":memory:"). +func (db *DB) Load(rd io.Reader) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist { + // cannot load into databases that persist to disk + return ErrPersistenceActive + } + return db.readLoad(rd, time.Now()) +} + +// index represents a b-tree or r-tree index and also acts as the +// b-tree/r-tree context for itself. +type index struct { + btr *btree.BTree // contains the items + rtr *rtree.RTree // contains the items + name string // name of the index + pattern string // a required key pattern + less func(a, b string) bool // less comparison function + rect func(item string) (min, max []float64) // rect from string function + db *DB // the origin database + opts IndexOptions // index options +} + +// match matches the pattern to the key +func (idx *index) match(key string) bool { + if idx.pattern == "*" { + return true + } + if idx.opts.CaseInsensitiveKeyMatching { + for i := 0; i < len(key); i++ { + if key[i] >= 'A' && key[i] <= 'Z' { + key = strings.ToLower(key) + break + } + } + } + return match.Match(key, idx.pattern) +} + +// clearCopy creates a copy of the index, but with an empty dataset. +func (idx *index) clearCopy() *index { + // copy the index meta information + nidx := &index{ + name: idx.name, + pattern: idx.pattern, + db: idx.db, + less: idx.less, + rect: idx.rect, + opts: idx.opts, + } + // initialize with empty trees + if nidx.less != nil { + nidx.btr = btree.New(btreeDegrees, nidx) + } + if nidx.rect != nil { + nidx.rtr = rtree.New(nidx) + } + return nidx +} + +// rebuild rebuilds the index +func (idx *index) rebuild() { + // initialize trees + if idx.less != nil { + idx.btr = btree.New(btreeDegrees, idx) + } + if idx.rect != nil { + idx.rtr = rtree.New(idx) + } + // iterate through all keys and fill the index + idx.db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + if !idx.match(dbi.key) { + // does not match the pattern, conintue + return true + } + if idx.less != nil { + idx.btr.ReplaceOrInsert(dbi) + } + if idx.rect != nil { + idx.rtr.Insert(dbi) + } + return true + }) +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +// +// Deprecated: Use Transactions +func (db *DB) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + return tx.CreateIndex(name, pattern, less...) + }) +} + +// ReplaceIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// If a previous index with the same name exists, that index will be deleted. +// +// Deprecated: Use Transactions +func (db *DB) ReplaceIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateIndex(name, pattern, less...) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateIndex(name, pattern, less...) + } + return err + } + return nil + }) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +// +// Deprecated: Use Transactions +func (db *DB) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + return tx.CreateSpatialIndex(name, pattern, rect) + }) +} + +// ReplaceSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// If a previous index with the same name exists, that index will be deleted. +// +// Deprecated: Use Transactions +func (db *DB) ReplaceSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateSpatialIndex(name, pattern, rect) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateSpatialIndex(name, pattern, rect) + } + return err + } + return nil + }) +} + +// DropIndex removes an index. +// +// Deprecated: Use Transactions +func (db *DB) DropIndex(name string) error { + return db.Update(func(tx *Tx) error { + return tx.DropIndex(name) + }) +} + +// Indexes returns a list of index names. +// +// Deprecated: Use Transactions +func (db *DB) Indexes() ([]string, error) { + var names []string + var err = db.View(func(tx *Tx) error { + var err error + names, err = tx.Indexes() + return err + }) + return names, err +} + +// ReadConfig returns the database configuration. +func (db *DB) ReadConfig(config *Config) error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + *config = db.config + return nil +} + +// SetConfig updates the database configuration. +func (db *DB) SetConfig(config Config) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + switch config.SyncPolicy { + default: + return ErrInvalidSyncPolicy + case Never, EverySecond, Always: + } + db.config = config + return nil +} + +// insertIntoDatabase performs inserts an item in to the database and updates +// all indexes. If a previous item with the same key already exists, that item +// will be replaced with the new one, and return the previous item. +func (db *DB) insertIntoDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.ReplaceOrInsert(item) + if prev != nil { + // A previous item was removed from the keys tree. Let's + // fully delete this item from all indexes. + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + if item.opts != nil && item.opts.ex { + // The new item has eviction options. Add it to the + // expires tree + db.exps.ReplaceOrInsert(item) + } + for _, idx := range db.idxs { + if !idx.match(item.key) { + continue + } + if idx.btr != nil { + // Add new item to btree index. + idx.btr.ReplaceOrInsert(item) + } + if idx.rtr != nil { + // Add new item to rtree index. + idx.rtr.Insert(item) + } + } + // we must return the previous item to the caller. + return pdbi +} + +// deleteFromDatabase removes and item from the database and indexes. The input +// item must only have the key field specified thus "&dbItem{key: key}" is all +// that is needed to fully remove the item with the matching key. If an item +// with the matching key was found in the database, it will be removed and +// returned to the caller. A nil return value means that the item was not +// found in the database +func (db *DB) deleteFromDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.Delete(item) + if prev != nil { + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + return pdbi +} + +// backgroundManager runs continuously in the background and performs various +// operations such as removing expired items and syncing to disk. +func (db *DB) backgroundManager() { + flushes := 0 + t := time.NewTicker(time.Second) + defer t.Stop() + for range t.C { + var shrink bool + // Open a standard view. This will take a full lock of the + // database thus allowing for access to anything we need. + var onExpired func([]string) + var expired []string + err := db.Update(func(tx *Tx) error { + onExpired = db.config.OnExpired + if db.persist && !db.config.AutoShrinkDisabled { + pos, err := db.file.Seek(0, 1) + if err != nil { + return err + } + aofsz := int(pos) + if aofsz > db.config.AutoShrinkMinSize { + prc := float64(db.config.AutoShrinkPercentage) / 100.0 + shrink = aofsz > db.lastaofsz+int(float64(db.lastaofsz)*prc) + } + } + // produce a list of expired items that need removing + db.exps.AscendLessThan(&dbItem{ + opts: &dbItemOpts{ex: true, exat: time.Now()}, + }, func(item btree.Item) bool { + expired = append(expired, item.(*dbItem).key) + return true + }) + if onExpired == nil { + for _, key := range expired { + if _, err := tx.Delete(key); err != nil { + // it's ok to get a "not found" because the + // 'Delete' method reports "not found" for + // expired items. + if err != ErrNotFound { + return err + } + } + } + } + return nil + }) + if err == ErrDatabaseClosed { + break + } + + // send expired event, if needed + if onExpired != nil && len(expired) > 0 { + onExpired(expired) + } + + // execute a disk sync, if needed + func() { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist && db.config.SyncPolicy == EverySecond && + flushes != db.flushes { + _ = db.file.Sync() + flushes = db.flushes + } + }() + if shrink { + if err = db.Shrink(); err != nil { + if err == ErrDatabaseClosed { + break + } + } + } + } +} + +// Shrink will make the database file smaller by removing redundant +// log entries. This operation does not block the database. +func (db *DB) Shrink() error { + db.mu.Lock() + if db.closed { + db.mu.Unlock() + return ErrDatabaseClosed + } + if !db.persist { + // The database was opened with ":memory:" as the path. + // There is no persistence, and no need to do anything here. + db.mu.Unlock() + return nil + } + if db.shrinking { + // The database is already in the process of shrinking. + db.mu.Unlock() + return ErrShrinkInProcess + } + db.shrinking = true + defer func() { + db.mu.Lock() + db.shrinking = false + db.mu.Unlock() + }() + fname := db.file.Name() + tmpname := fname + ".tmp" + // the endpos is used to return to the end of the file when we are + // finished writing all of the current items. + endpos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.mu.Unlock() + time.Sleep(time.Second / 4) // wait just a bit before starting + f, err := os.Create(tmpname) + if err != nil { + return err + } + defer func() { + _ = f.Close() + _ = os.RemoveAll(tmpname) + }() + + // we are going to read items in as chunks as to not hold up the database + // for too long. + var buf []byte + pivot := "" + done := false + for !done { + err := func() error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + done = true + var n int + db.keys.AscendGreaterOrEqual(&dbItem{key: pivot}, + func(item btree.Item) bool { + dbi := item.(*dbItem) + // 1000 items or 64MB buffer + if n > 1000 || len(buf) > 64*1024*1024 { + pivot = dbi.key + done = false + return false + } + buf = dbi.writeSetTo(buf) + n++ + return true + }, + ) + if len(buf) > 0 { + if _, err := f.Write(buf); err != nil { + return err + } + buf = buf[:0] + } + return nil + }() + if err != nil { + return err + } + } + // We reached this far so all of the items have been written to a new tmp + // There's some more work to do by appending the new line from the aof + // to the tmp file and finally swap the files out. + return func() error { + // We're wrapping this in a function to get the benefit of a defered + // lock/unlock. + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + // We are going to open a new version of the aof file so that we do + // not change the seek position of the previous. This may cause a + // problem in the future if we choose to use syscall file locking. + aof, err := os.Open(fname) + if err != nil { + return err + } + defer func() { _ = aof.Close() }() + if _, err := aof.Seek(endpos, 0); err != nil { + return err + } + // Just copy all of the new commands that have occurred since we + // started the shrink process. + if _, err := io.Copy(f, aof); err != nil { + return err + } + // Close all files + if err := aof.Close(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := db.file.Close(); err != nil { + return err + } + // Any failures below here is really bad. So just panic. + if err := os.Rename(tmpname, fname); err != nil { + panic(err) + } + db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + panic(err) + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil + }() +} + +var errValidEOF = errors.New("valid eof") + +// readLoad reads from the reader and loads commands into the database. +// modTime is the modified time of the reader, should be no greater than +// the current time.Now(). +func (db *DB) readLoad(rd io.Reader, modTime time.Time) error { + data := make([]byte, 4096) + parts := make([]string, 0, 8) + r := bufio.NewReader(rd) + for { + // read a single command. + // first we should read the number of parts that the of the command + line, err := r.ReadBytes('\n') + if err != nil { + if len(line) > 0 { + // got an eof but also data. this should be an unexpected eof. + return io.ErrUnexpectedEOF + } + if err == io.EOF { + break + } + return err + } + if line[0] != '*' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // read each part of the command. + parts = parts[:0] + for i := 0; i < n; i++ { + // read the number of bytes of the part. + line, err := r.ReadBytes('\n') + if err != nil { + return err + } + if line[0] != '$' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // resize the read buffer + if len(data) < n+2 { + dataln := len(data) + for dataln < n+2 { + dataln *= 2 + } + data = make([]byte, dataln) + } + if _, err = io.ReadFull(r, data[:n+2]); err != nil { + return err + } + if data[n] != '\r' || data[n+1] != '\n' { + return ErrInvalid + } + // copy string + parts = append(parts, string(data[:n])) + } + // finished reading the command + + if len(parts) == 0 { + continue + } + if (parts[0][0] == 's' || parts[0][1] == 'S') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 't' || parts[0][2] == 'T') { + // SET + if len(parts) < 3 || len(parts) == 4 || len(parts) > 5 { + return ErrInvalid + } + if len(parts) == 5 { + if strings.ToLower(parts[3]) != "ex" { + return ErrInvalid + } + ex, err := strconv.ParseInt(parts[4], 10, 64) + if err != nil { + return err + } + now := time.Now() + dur := (time.Duration(ex) * time.Second) - now.Sub(modTime) + if dur > 0 { + db.insertIntoDatabase(&dbItem{ + key: parts[1], + val: parts[2], + opts: &dbItemOpts{ + ex: true, + exat: now.Add(dur), + }, + }) + } + } else { + db.insertIntoDatabase(&dbItem{key: parts[1], val: parts[2]}) + } + } else if (parts[0][0] == 'd' || parts[0][1] == 'D') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 'l' || parts[0][2] == 'L') { + // DEL + if len(parts) != 2 { + return ErrInvalid + } + db.deleteFromDatabase(&dbItem{key: parts[1]}) + } else if (parts[0][0] == 'f' || parts[0][1] == 'F') && + strings.ToLower(parts[0]) == "flushdb" { + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + } else { + return ErrInvalid + } + } + return nil +} + +// load reads entries from the append only database file and fills the database. +// The file format uses the Redis append only file format, which is and a series +// of RESP commands. For more information on RESP please read +// http://redis.io/topics/protocol. The only supported RESP commands are DEL and +// SET. +func (db *DB) load() error { + fi, err := db.file.Stat() + if err != nil { + return err + } + if err := db.readLoad(db.file, fi.ModTime()); err != nil { + return err + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil +} + +// managed calls a block of code that is fully contained in a transaction. +// This method is intended to be wrapped by Update and View +func (db *DB) managed(writable bool, fn func(tx *Tx) error) (err error) { + var tx *Tx + tx, err = db.Begin(writable) + if err != nil { + return + } + defer func() { + if err != nil { + // The caller returned an error. We must rollback. + _ = tx.Rollback() + return + } + if writable { + // Everything went well. Lets Commit() + err = tx.Commit() + } else { + // read-only transaction can only roll back. + err = tx.Rollback() + } + }() + tx.funcd = true + defer func() { + tx.funcd = false + }() + err = fn(tx) + return +} + +// View executes a function within a managed read-only transaction. +// When a non-nil error is returned from the function that error will be return +// to the caller of View(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) View(fn func(tx *Tx) error) error { + return db.managed(false, fn) +} + +// Update executes a function within a managed read/write transaction. +// The transaction has been committed when no error is returned. +// In the event that an error is returned, the transaction will be rolled back. +// When a non-nil error is returned from the function, the transaction will be +// rolled back and the that error will be return to the caller of Update(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) Update(fn func(tx *Tx) error) error { + return db.managed(true, fn) +} + +// get return an item or nil if not found. +func (db *DB) get(key string) *dbItem { + item := db.keys.Get(&dbItem{key: key}) + if item != nil { + return item.(*dbItem) + } + return nil +} + +// Tx represents a transaction on the database. This transaction can either be +// read-only or read/write. Read-only transactions can be used for retrieving +// values for keys and iterating through keys and values. Read/write +// transactions can set and delete keys. +// +// All transactions must be committed or rolled-back when done. +type Tx struct { + db *DB // the underlying database. + writable bool // when false mutable operations fail. + funcd bool // when true Commit and Rollback panic. + wc *txWriteContext // context for writable transactions. +} + +type txWriteContext struct { + // rollback when deleteAll is called + rbkeys *btree.BTree // a tree of all item ordered by key + rbexps *btree.BTree // a tree of items ordered by expiration + rbidxs map[string]*index // the index trees. + + rollbackItems map[string]*dbItem // details for rolling back tx. + commitItems map[string]*dbItem // details for committing tx. + itercount int // stack of iterators + rollbackIndexes map[string]*index // details for dropped indexes. +} + +// DeleteAll deletes all items from the database. +func (tx *Tx) DeleteAll() error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + + // check to see if we've already deleted everything + if tx.wc.rbkeys == nil { + // we need to backup the live data in case of a rollback. + tx.wc.rbkeys = tx.db.keys + tx.wc.rbexps = tx.db.exps + tx.wc.rbidxs = tx.db.idxs + } + + // now reset the live database trees + tx.db.keys = btree.New(btreeDegrees, nil) + tx.db.exps = btree.New(btreeDegrees, &exctx{tx.db}) + tx.db.idxs = make(map[string]*index) + + // finally re-create the indexes + for name, idx := range tx.wc.rbidxs { + tx.db.idxs[name] = idx.clearCopy() + } + + // always clear out the commits + tx.wc.commitItems = make(map[string]*dbItem) + + return nil +} + +// Begin opens a new transaction. +// Multiple read-only transactions can be opened at the same time but there can +// only be one read/write transaction at a time. Attempting to open a read/write +// transactions while another one is in progress will result in blocking until +// the current read/write transaction is completed. +// +// All transactions must be closed by calling Commit() or Rollback() when done. +func (db *DB) Begin(writable bool) (*Tx, error) { + tx := &Tx{ + db: db, + writable: writable, + } + tx.lock() + if db.closed { + tx.unlock() + return nil, ErrDatabaseClosed + } + if writable { + // writable transactions have a writeContext object that + // contains information about changes to the database. + tx.wc = &txWriteContext{} + tx.wc.rollbackItems = make(map[string]*dbItem) + tx.wc.rollbackIndexes = make(map[string]*index) + if db.persist { + tx.wc.commitItems = make(map[string]*dbItem) + } + } + return tx, nil +} + +// lock locks the database based on the transaction type. +func (tx *Tx) lock() { + if tx.writable { + tx.db.mu.Lock() + } else { + tx.db.mu.RLock() + } +} + +// unlock unlocks the database based on the transaction type. +func (tx *Tx) unlock() { + if tx.writable { + tx.db.mu.Unlock() + } else { + tx.db.mu.RUnlock() + } +} + +// rollbackInner handles the underlying rollback logic. +// Intended to be called from Commit() and Rollback(). +func (tx *Tx) rollbackInner() { + // rollback the deleteAll if needed + if tx.wc.rbkeys != nil { + tx.db.keys = tx.wc.rbkeys + tx.db.idxs = tx.wc.rbidxs + tx.db.exps = tx.wc.rbexps + } + for key, item := range tx.wc.rollbackItems { + tx.db.deleteFromDatabase(&dbItem{key: key}) + if item != nil { + // When an item is not nil, we will need to reinsert that item + // into the database overwriting the current one. + tx.db.insertIntoDatabase(item) + } + } + for name, idx := range tx.wc.rollbackIndexes { + delete(tx.db.idxs, name) + if idx != nil { + // When an index is not nil, we will need to rebuilt that index + // this could be an expensive process if the database has many + // items or the index is complex. + tx.db.idxs[name] = idx + idx.rebuild() + } + } +} + +// Commit writes all changes to disk. +// An error is returned when a write error occurs, or when a Commit() is called +// from a read-only transaction. +func (tx *Tx) Commit() error { + if tx.funcd { + panic("managed tx commit not allowed") + } + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + var err error + if tx.db.persist && (len(tx.wc.commitItems) > 0 || tx.wc.rbkeys != nil) { + tx.db.buf = tx.db.buf[:0] + // write a flushdb if a deleteAll was called. + if tx.wc.rbkeys != nil { + tx.db.buf = append(tx.db.buf, "*1\r\n$7\r\nflushdb\r\n"...) + } + // Each committed record is written to disk + for key, item := range tx.wc.commitItems { + if item == nil { + tx.db.buf = (&dbItem{key: key}).writeDeleteTo(tx.db.buf) + } else { + tx.db.buf = item.writeSetTo(tx.db.buf) + } + } + // Flushing the buffer only once per transaction. + // If this operation fails then the write did failed and we must + // rollback. + if _, err = tx.db.file.Write(tx.db.buf); err != nil { + tx.rollbackInner() + } + if tx.db.config.SyncPolicy == Always { + _ = tx.db.file.Sync() + } + // Increment the number of flushes. The background syncing uses this. + tx.db.flushes++ + } + // Unlock the database and allow for another writable transaction. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return err +} + +// Rollback closes the transaction and reverts all mutable operations that +// were performed on the transaction such as Set() and Delete(). +// +// Read-only transactions can only be rolled back, not committed. +func (tx *Tx) Rollback() error { + if tx.funcd { + panic("managed tx rollback not allowed") + } + if tx.db == nil { + return ErrTxClosed + } + // The rollback func does the heavy lifting. + if tx.writable { + tx.rollbackInner() + } + // unlock the database for more transactions. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return nil +} + +// dbItemOpts holds various meta information about an item. +type dbItemOpts struct { + ex bool // does this item expire? + exat time.Time // when does this item expire? +} +type dbItem struct { + key, val string // the binary key and value + opts *dbItemOpts // optional meta information +} + +func appendArray(buf []byte, count int) []byte { + buf = append(buf, '*') + buf = append(buf, strconv.FormatInt(int64(count), 10)...) + buf = append(buf, '\r', '\n') + return buf +} + +func appendBulkString(buf []byte, s string) []byte { + buf = append(buf, '$') + buf = append(buf, strconv.FormatInt(int64(len(s)), 10)...) + buf = append(buf, '\r', '\n') + buf = append(buf, s...) + buf = append(buf, '\r', '\n') + return buf +} + +// writeSetTo writes an item as a single SET record to the a bufio Writer. +func (dbi *dbItem) writeSetTo(buf []byte) []byte { + if dbi.opts != nil && dbi.opts.ex { + ex := dbi.opts.exat.Sub(time.Now()) / time.Second + buf = appendArray(buf, 5) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + buf = appendBulkString(buf, "ex") + buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10)) + } else { + buf = appendArray(buf, 3) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + } + return buf +} + +// writeSetTo writes an item as a single DEL record to the a bufio Writer. +func (dbi *dbItem) writeDeleteTo(buf []byte) []byte { + buf = appendArray(buf, 2) + buf = appendBulkString(buf, "del") + buf = appendBulkString(buf, dbi.key) + return buf +} + +// expired evaluates id the item has expired. This will always return false when +// the item does not have `opts.ex` set to true. +func (dbi *dbItem) expired() bool { + return dbi.opts != nil && dbi.opts.ex && time.Now().After(dbi.opts.exat) +} + +// MaxTime from http://stackoverflow.com/questions/25065055#32620397 +// This is a long time in the future. It's an imaginary number that is +// used for b-tree ordering. +var maxTime = time.Unix(1<<63-62135596801, 999999999) + +// expiresAt will return the time when the item will expire. When an item does +// not expire `maxTime` is used. +func (dbi *dbItem) expiresAt() time.Time { + if dbi.opts == nil || !dbi.opts.ex { + return maxTime + } + return dbi.opts.exat +} + +// Less determines if a b-tree item is less than another. This is required +// for ordering, inserting, and deleting items from a b-tree. It's important +// to note that the ctx parameter is used to help with determine which +// formula to use on an item. Each b-tree should use a different ctx when +// sharing the same item. +func (dbi *dbItem) Less(item btree.Item, ctx interface{}) bool { + dbi2 := item.(*dbItem) + switch ctx := ctx.(type) { + case *exctx: + // The expires b-tree formula + if dbi2.expiresAt().After(dbi.expiresAt()) { + return true + } + if dbi.expiresAt().After(dbi2.expiresAt()) { + return false + } + case *index: + if ctx.less != nil { + // Using an index + if ctx.less(dbi.val, dbi2.val) { + return true + } + if ctx.less(dbi2.val, dbi.val) { + return false + } + } + } + // Always fall back to the key comparison. This creates absolute uniqueness. + return dbi.key < dbi2.key +} + +// Rect converts a string to a rectangle. +// An invalid rectangle will cause a panic. +func (dbi *dbItem) Rect(ctx interface{}) (min, max []float64) { + switch ctx := ctx.(type) { + case *index: + return ctx.rect(dbi.val) + } + return nil, nil +} + +// SetOptions represents options that may be included with the Set() command. +type SetOptions struct { + // Expires indicates that the Set() key-value will expire + Expires bool + // TTL is how much time the key-value will exist in the database + // before being evicted. The Expires field must also be set to true. + // TTL stands for Time-To-Live. + TTL time.Duration +} + +// GetLess returns the less function for an index. This is handy for +// doing ad-hoc compares inside a transaction. +// Returns ErrNotFound if the index is not found or there is no less +// function bound to the index +func (tx *Tx) GetLess(index string) (func(a, b string) bool, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.less == nil { + return nil, ErrNotFound + } + return idx.less, nil +} + +// GetRect returns the rect function for an index. This is handy for +// doing ad-hoc searches inside a transaction. +// Returns ErrNotFound if the index is not found or there is no rect +// function bound to the index +func (tx *Tx) GetRect(index string) (func(s string) (min, max []float64), + error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.rect == nil { + return nil, ErrNotFound + } + return idx.rect, nil +} + +// Set inserts or replaces an item in the database based on the key. +// The opt params may be used for additional functionality such as forcing +// the item to be evicted at a specified time. When the return value +// for err is nil the operation succeeded. When the return value of +// replaced is true, then the operaton replaced an existing item whose +// value will be returned through the previousValue variable. +// The results of this operation will not be available to other +// transactions until the current transaction has successfully committed. +// +// Only a writable transaction can be used with this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Set(key, value string, opts *SetOptions) (previousValue string, + replaced bool, err error) { + if tx.db == nil { + return "", false, ErrTxClosed + } else if !tx.writable { + return "", false, ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", false, ErrTxIterating + } + item := &dbItem{key: key, val: value} + if opts != nil { + if opts.Expires { + // The caller is requesting that this item expires. Convert the + // TTL to an absolute time and bind it to the item. + item.opts = &dbItemOpts{ex: true, exat: time.Now().Add(opts.TTL)} + } + } + // Insert the item into the keys tree. + prev := tx.db.insertIntoDatabase(item) + + // insert into the rollback map if there has not been a deleteAll. + if tx.wc.rbkeys == nil { + if prev == nil { + // An item with the same key did not previously exist. Let's + // create a rollback entry with a nil value. A nil value indicates + // that the entry should be deleted on rollback. When the value is + // *not* nil, that means the entry should be reverted. + tx.wc.rollbackItems[key] = nil + } else { + // A previous item already exists in the database. Let's create a + // rollback entry with the item as the value. We need to check the + // map to see if there isn't already an item that matches the + // same key. + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = prev + } + if !prev.expired() { + previousValue, replaced = prev.val, true + } + } + } + // For commits we simply assign the item to the map. We use this map to + // write the entry to disk. + if tx.db.persist { + tx.wc.commitItems[key] = item + } + return previousValue, replaced, nil +} + +// Get returns a value for a key. If the item does not exist or if the item +// has expired then ErrNotFound is returned. +func (tx *Tx) Get(key string) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } + item := tx.db.get(key) + if item == nil || item.expired() { + // The item does not exists or has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// Delete removes an item from the database based on the item's key. If the item +// does not exist or if the item has expired then ErrNotFound is returned. +// +// Only a writable transaction can be used for this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Delete(key string) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } else if !tx.writable { + return "", ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", ErrTxIterating + } + item := tx.db.deleteFromDatabase(&dbItem{key: key}) + if item == nil { + return "", ErrNotFound + } + // create a rollback entry if there has not been a deleteAll call. + if tx.wc.rbkeys == nil { + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = item + } + } + if tx.db.persist { + tx.wc.commitItems[key] = nil + } + // Even though the item has been deleted, we still want to check + // if it has expired. An expired item should not be returned. + if item.expired() { + // The item exists in the tree, but has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// TTL returns the remaining time-to-live for an item. +// A negative duration will be returned for items that do not have an +// expiration. +func (tx *Tx) TTL(key string) (time.Duration, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + item := tx.db.get(key) + if item == nil { + return 0, ErrNotFound + } else if item.opts == nil || !item.opts.ex { + return -1, nil + } + dur := item.opts.exat.Sub(time.Now()) + if dur < 0 { + return 0, ErrNotFound + } + return dur, nil +} + +// scan iterates through a specified index and calls user-defined iterator +// function for each item encountered. +// The desc param indicates that the iterator should descend. +// The gt param indicates that there is a greaterThan limit. +// The lt param indicates that there is a lessThan limit. +// The index param tells the scanner to use the specified index tree. An +// empty string for the index means to scan the keys, not the values. +// The start and stop params are the greaterThan, lessThan limits. For +// descending order, these will be lessThan, greaterThan. +// An error will be returned if the tx is closed or the index is not found. +func (tx *Tx) scan(desc, gt, lt bool, index, start, stop string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + // wrap a btree specific iterator around the user-defined iterator. + iter := func(item btree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + var tr *btree.BTree + if index == "" { + // empty index means we will use the keys tree. + tr = tx.db.keys + } else { + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + tr = idx.btr + if tr == nil { + return nil + } + } + // create some limit items + var itemA, itemB *dbItem + if gt || lt { + if index == "" { + itemA = &dbItem{key: start} + itemB = &dbItem{key: stop} + } else { + itemA = &dbItem{val: start} + itemB = &dbItem{val: stop} + } + } + // execute the scan on the underlying tree. + if tx.wc != nil { + tx.wc.itercount++ + defer func() { + tx.wc.itercount-- + }() + } + if desc { + if gt { + if lt { + tr.DescendRange(itemA, itemB, iter) + } else { + tr.DescendGreaterThan(itemA, iter) + } + } else if lt { + tr.DescendLessOrEqual(itemA, iter) + } else { + tr.Descend(iter) + } + } else { + if gt { + if lt { + tr.AscendRange(itemA, itemB, iter) + } else { + tr.AscendGreaterOrEqual(itemA, iter) + } + } else if lt { + tr.AscendLessThan(itemA, iter) + } else { + tr.Ascend(iter) + } + } + return nil +} + +// Match returns true if the specified key matches the pattern. This is a very +// simple pattern matcher where '*' matches on any number characters and '?' +// matches on any one character. +func Match(key, pattern string) bool { + return match.Match(key, pattern) +} + +// AscendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) AscendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Ascend("", iterator) + } + return tx.Ascend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.AscendGreaterOrEqual("", min, func(key, value string) bool { + if key > max { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// DescendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) DescendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Descend("", iterator) + } + return tx.Descend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.DescendLessOrEqual("", max, func(key, value string) bool { + if key < min { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// Ascend calls the iterator for every item in the database within the range +// [first, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Ascend(index string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, false, index, "", "", iterator) +} + +// AscendGreaterOrEqual calls the iterator for every item in the database within +// the range [pivot, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendGreaterOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, true, false, index, pivot, "", iterator) +} + +// AscendLessThan calls the iterator for every item in the database within the +// range [first, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendLessThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, true, index, pivot, "", iterator) +} + +// AscendRange calls the iterator for every item in the database within +// the range [greaterOrEqual, lessThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendRange(index, greaterOrEqual, lessThan string, + iterator func(key, value string) bool) error { + return tx.scan( + false, true, true, index, greaterOrEqual, lessThan, iterator, + ) +} + +// Descend calls the iterator for every item in the database within the range +// [last, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Descend(index string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, false, index, "", "", iterator) +} + +// DescendGreaterThan calls the iterator for every item in the database within +// the range [last, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendGreaterThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, true, false, index, pivot, "", iterator) +} + +// DescendLessOrEqual calls the iterator for every item in the database within +// the range [pivot, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendLessOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, true, index, pivot, "", iterator) +} + +// DescendRange calls the iterator for every item in the database within +// the range [lessOrEqual, greaterThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendRange(index, lessOrEqual, greaterThan string, + iterator func(key, value string) bool) error { + return tx.scan( + true, true, true, index, lessOrEqual, greaterThan, iterator, + ) +} + +// rect is used by Intersects +type rect struct { + min, max []float64 +} + +func (r *rect) Rect(ctx interface{}) (min, max []float64) { + return r.min, r.max +} + +// Intersects searches for rectangle items that intersect a target rect. +// The specified index must have been created by AddIndex() and the target +// is represented by the rect string. This string will be processed by the +// same bounds function that was passed to the CreateSpatialIndex() function. +// An invalid index will return an error. +func (tx *Tx) Intersects(index, bounds string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + if index == "" { + // cannot search on keys tree. just return nil. + return nil + } + // wrap a rtree specific iterator around the user-defined iterator. + iter := func(item rtree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + if idx.rtr == nil { + // not an r-tree index. just return nil + return nil + } + // execute the search + var min, max []float64 + if idx.rect != nil { + min, max = idx.rect(bounds) + } + idx.rtr.Search(&rect{min, max}, iter) + return nil +} + +// Len returns the number of items in the database +func (tx *Tx) Len() (int, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + return tx.db.keys.Len(), nil +} + +// IndexOptions provides an index with additional features or +// alternate functionality. +type IndexOptions struct { + // CaseInsensitiveKeyMatching allow for case-insensitive + // matching on keys when setting key/values. + CaseInsensitiveKeyMatching bool +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +func (tx *Tx) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, nil) +} + +// CreateIndexOptions is the same as CreateIndex except that it allows +// for additional options. +func (tx *Tx) CreateIndexOptions(name, pattern string, + opts *IndexOptions, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, opts) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +func (tx *Tx) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// CreateSpatialIndexOptions is the same as CreateSpatialIndex except that +// it allows for additional options. +func (tx *Tx) CreateSpatialIndexOptions(name, pattern string, + opts *IndexOptions, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// createIndex is called by CreateIndex() and CreateSpatialIndex() +func (tx *Tx) createIndex(name string, pattern string, + lessers []func(a, b string) bool, + rect func(item string) (min, max []float64), + opts *IndexOptions, +) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot create an index without a name. + // an empty name index is designated for the main "keys" tree. + return ErrIndexExists + } + // check if an index with that name already exists. + if _, ok := tx.db.idxs[name]; ok { + // index with name already exists. error. + return ErrIndexExists + } + // genreate a less function + var less func(a, b string) bool + switch len(lessers) { + default: + // multiple less functions specified. + // create a compound less function. + less = func(a, b string) bool { + for i := 0; i < len(lessers)-1; i++ { + if lessers[i](a, b) { + return true + } + if lessers[i](b, a) { + return false + } + } + return lessers[len(lessers)-1](a, b) + } + case 0: + // no less function + case 1: + less = lessers[0] + } + var sopts IndexOptions + if opts != nil { + sopts = *opts + } + if sopts.CaseInsensitiveKeyMatching { + pattern = strings.ToLower(pattern) + } + // intialize new index + idx := &index{ + name: name, + pattern: pattern, + less: less, + rect: rect, + db: tx.db, + opts: sopts, + } + idx.rebuild() + // save the index + tx.db.idxs[name] = idx + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use nil to indicate that the index should be removed upon rollback. + tx.wc.rollbackIndexes[name] = nil + } + } + return nil +} + +// DropIndex removes an index. +func (tx *Tx) DropIndex(name string) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot drop the default "keys" index + return ErrInvalidOperation + } + idx, ok := tx.db.idxs[name] + if !ok { + return ErrNotFound + } + // delete from the map. + // this is all that is needed to delete an index. + delete(tx.db.idxs, name) + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use a non-nil copy of the index without the data to indicate that the + // index should be rebuilt upon rollback. + tx.wc.rollbackIndexes[name] = idx.clearCopy() + } + } + return nil +} + +// Indexes returns a list of index names. +func (tx *Tx) Indexes() ([]string, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + names := make([]string, 0, len(tx.db.idxs)) + for name := range tx.db.idxs { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +// Rect is helper function that returns a string representation +// of a rect. IndexRect() is the reverse function and can be used +// to generate a rect from a string. +func Rect(min, max []float64) string { + r := grect.Rect{Min: min, Max: max} + return r.String() +} + +// Point is a helper function that converts a series of float64s +// to a rectangle for a spatial index. +func Point(coords ...float64) string { + return Rect(coords, coords) +} + +// IndexRect is a helper function that converts string to a rect. +// Rect() is the reverse function and can be used to generate a string +// from a rect. +func IndexRect(a string) (min, max []float64) { + r := grect.Get(a) + return r.Min, r.Max +} + +// IndexString is a helper function that return true if 'a' is less than 'b'. +// This is a case-insensitive comparison. Use the IndexBinary() for comparing +// case-sensitive strings. +func IndexString(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// IndexBinary is a helper function that returns true if 'a' is less than 'b'. +// This compares the raw binary of the string. +func IndexBinary(a, b string) bool { + return a < b +} + +// IndexInt is a helper function that returns true if 'a' is less than 'b'. +func IndexInt(a, b string) bool { + ia, _ := strconv.ParseInt(a, 10, 64) + ib, _ := strconv.ParseInt(b, 10, 64) + return ia < ib +} + +// IndexUint is a helper function that returns true if 'a' is less than 'b'. +// This compares uint64s that are added to the database using the +// Uint() conversion function. +func IndexUint(a, b string) bool { + ia, _ := strconv.ParseUint(a, 10, 64) + ib, _ := strconv.ParseUint(b, 10, 64) + return ia < ib +} + +// IndexFloat is a helper function that returns true if 'a' is less than 'b'. +// This compares float64s that are added to the database using the +// Float() conversion function. +func IndexFloat(a, b string) bool { + ia, _ := strconv.ParseFloat(a, 64) + ib, _ := strconv.ParseFloat(b, 64) + return ia < ib +} + +// IndexJSON provides for the ability to create an index on any JSON field. +// When the field is a string, the comparison will be case-insensitive. +// It returns a helper function used by CreateIndex. +func IndexJSON(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), false) + } +} + +// IndexJSONCaseSensitive provides for the ability to create an index on +// any JSON field. +// When the field is a string, the comparison will be case-sensitive. +// It returns a helper function used by CreateIndex. +func IndexJSONCaseSensitive(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), true) + } +} + +// Desc is a helper function that changes the order of an index. +func Desc(less func(a, b string) bool) func(a, b string) bool { + return func(a, b string) bool { return less(b, a) } +} diff --git a/vendor/github.com/tidwall/buntdb/logo.png b/vendor/github.com/tidwall/buntdb/logo.png new file mode 100644 index 0000000..01c6d75 Binary files /dev/null and b/vendor/github.com/tidwall/buntdb/logo.png differ diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 0000000..b38f920 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,369 @@ + + +get a json value quickly
+ +GJSON is a Go package that provides a [very fast](#performance) and simple way to get a value from a json document. The purpose for this library it to give efficient json indexing for the [BuntDB](https://github.com/tidwall/buntdb) project. + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". This function expects that the json is well-formed and validates. Invalid json will not panic, but it may return back unexpected results. When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#[...]`, or find all matches with `#[...]#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators and the simple pattern matching `%` operator. + +``` +friends.#[last=="Murphy"].first >> "Dale" +friends.#[last=="Murphy"]#.first >> ["Dale","Jane"] +friends.#[age>45]#.last >> ["Craig","Murphy"] +friends.#[first%"D*"].last >> "Murphy" +``` + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +``` + +There are a variety of handy functions that work on a result: + +```go +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + + + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +}` +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _,name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#[lastName="Hunter"].firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool{ + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists(){ + println("has a last name") +} +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok{ + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Get multiple values at once + +The `GetMany` function can be used to get multiple values at the same time, and is optimized to scan over a JSON payload once. + +```go +results := gjson.GetMany(json, "name.first", "name.last", "age") +``` + +The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +and [jsonparser](https://github.com/buger/jsonparser) + +``` +BenchmarkGJSONGet-8 15000000 333 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-8 900000 4188 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-8 600000 8908 ns/op 3048 B/op 69 allocs/op +BenchmarkJSONUnmarshalStruct-8 600000 9026 ns/op 1832 B/op 69 allocs/op +BenchmarkJSONDecoder-8 300000 14339 ns/op 4224 B/op 184 allocs/op +BenchmarkFFJSONLexer-8 1500000 3156 ns/op 896 B/op 8 allocs/op +BenchmarkEasyJSONLexer-8 3000000 938 ns/op 613 B/op 6 allocs/op +BenchmarkJSONParserGet-8 3000000 442 ns/op 21 B/op 0 allocs/op +``` + +Benchmarks for the `GetMany` function: + +``` +BenchmarkGJSONGetMany4Paths-8 4000000 319 ns/op 112 B/op 0 allocs/op +BenchmarkGJSONGetMany8Paths-8 8000000 218 ns/op 56 B/op 0 allocs/op +BenchmarkGJSONGetMany16Paths-8 16000000 160 ns/op 56 B/op 0 allocs/op +BenchmarkGJSONGetMany32Paths-8 32000000 130 ns/op 64 B/op 0 allocs/op +BenchmarkGJSONGetMany64Paths-8 64000000 117 ns/op 64 B/op 0 allocs/op +BenchmarkGJSONGetMany128Paths-8 128000000 109 ns/op 64 B/op 0 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated though one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +For the `GetMany` benchmarks these paths are used: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +widget.window.title +widget.image.alignment +widget.text.style +widget.window.height +widget.image.src +widget.text.data +widget.text.size +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GJSON source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 0000000..9b28df2 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,1946 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "reflect" + "strconv" + + // It's totally safe to use this package, but in case your + // project or organization restricts the use of 'unsafe', + // there's the "github.com/tidwall/gjson-safe" package. + "unsafe" + + "github.com/tidwall/match" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "null" + case False: + return "false" + case Number: + return strconv.FormatFloat(t.Num, 'f', -1, 64) + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + return t.Str != "" && t.Str != "0" + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseInt(t.Str, 10, 64) + return n + case Number: + return int64(t.Num) + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseUint(t.Str, 10, 64) + return n + case Number: + return uint64(t.Num) + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Array returns back an array of values. +// If the result represents a non-existent value, then an empty array will be returned. +// If the result is not a JSON array, the return value will be an array containing one result. +func (t Result) Array() []Result { + if !t.Exists() { + return nil + } + if t.Type != JSON { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be iterated. +// If the result is an Object, the iterator will pass the key and value of each item. +// If the result is an Array, the iterator will only pass the value of each item. +// If the result is not a JSON array or object, the iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var keys bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + keys = true + break + } else if json[i] == '[' { + i++ + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + for ; i < len(json); i++ { + if keys { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + value.Index = s + if !iterator(key, value) { + return + } + } +} + +// Map returns back an map of values. The result should be a JSON array. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + return Get(t.Raw, path) +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + r.oi[key.Str] = value.Value() + } else { + r.o[key.Str] = value + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + return +} + +// Parse parses the json and returns a result. +func Parse(json string) Result { + var value Result + for i := 0; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + return Result{} + } + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + break + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' has already been read + depth := 1 + for i := 1; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[': + depth++ + case '}', ']': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + continue + } + if json[i] < ']' { + // probably a valid number + continue + } + if json[i] == 'e' || json[i] == 'E' { + // allow for exponential numbers + continue + } + // likely a ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] <= 'a' || json[i] >= 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + path string + op string + value string + all bool + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '.' { + r.part = path[:i] + r.path = path[i+1:] + r.more = true + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' { + r.query.on = true + // query + i += 2 + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s := i + for ; i < len(path); i++ { + if path[i] <= ' ' || + path[i] == '!' || + path[i] == '=' || + path[i] == '<' || + path[i] == '>' || + path[i] == '%' || + path[i] == ']' { + break + } + } + r.query.path = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + if i < len(path) { + s = i + if path[i] == '!' { + if i < len(path)-1 && path[i+1] == '=' { + i++ + } + } else if path[i] == '<' || path[i] == '>' { + if i < len(path)-1 && path[i+1] == '=' { + i++ + } + } else if path[i] == '=' { + if i < len(path)-1 && path[i+1] == '=' { + s++ + i++ + } + } + i++ + r.query.op = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s = i + for ; i < len(path); i++ { + if path[i] == '"' { + i++ + s2 := i + for ; i < len(path); i++ { + if path[i] > '\\' { + continue + } + if path[i] == '"' { + // look for an escaped slash + if path[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if path[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + } else if path[i] == ']' { + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + break + } + } + if i > len(path) { + i = len(path) + } + v := path[s:i] + for len(v) > 0 && v[len(v)-1] <= ' ' { + v = v[:len(v)-1] + } + r.query.value = v + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +type objectPathResult struct { + part string + path string + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '.' { + r.part = path[:i] + r.path = path[i+1:] + r.more = true + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + r.path = path[i+1:] + r.more = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[': + depth++ + case '}', ']': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + i, key, kesc, ok = i, c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = match.Match(unescape(key), rp.part) + } else { + pmatch = match.Match(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + } + break + } + } + return i, false +} +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 2 && rpv[0] == '"' && rpv[len(rpv)-1] == '"' { + rpv = rpv[1 : len(rpv)-1] + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return match.Match(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num == rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + rp := parseArrayPath(path) + if !rp.arrch { + n, err := strconv.ParseUint(rp.part, 10, 64) + if err != nil { + partidx = -1 + } else { + partidx = int(n) + } + } + for i < len(c.json) { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; i < len(c.json); i++ { + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + res := Get(val, rp.query.path) + if queryMatches(&rp, res) { + if rp.more { + res = Get(val, rp.path) + } else { + res = Result{Raw: val, Type: JSON} + } + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } else { + multires = append(multires, ',') + } + multires = append(multires, res.Raw...) + } else { + c.value = res + return i, true + } + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + res := Get(c.json[alog[j]:], rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + jsons = append(jsons, []byte(res.Raw)...) + k++ + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + return i + 1, true + } else { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num = float64(h - 1) + c.calcd = true + return i + 1, true + } + } + if len(multires) > 0 && !c.value.Exists() { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + } + } + return i + 1, false + } + break + } + } + return i, false +} + +type parseContext struct { + json string + value Result + calcd bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// When the value is found it's returned immediately. +// +// A path is a series of keys searated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +func Get(json, path string) Result { + var i int + var c = &parseContext{json: json} + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*reflect.StringHeader)(unsafe.Pointer(&json)) + rhdr := *(*reflect.StringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(rhdr.Data - jhdr.Data) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } + return c.value +} +func fromBytesGet(result Result) Result { + // safely get the string headers + rawhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := reflect.SliceHeader{Data: rawhi.Data, Len: rawhi.Len} + strh := reflect.SliceHeader{Data: strhi.Data, Len: strhi.Len} + if strh.Data == 0 { + // str is nil + if rawh.Data == 0 { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.Data == 0 { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if strh.Data >= rawh.Data && + int(strh.Data)+strh.Len <= int(rawh.Data)+rawh.Len { + // Str is a substring of Raw. + start := int(strh.Data - rawh.Data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+strh.Len] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + return result +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + result = fromBytesGet(result) + } + return result +} + +// unescape unescapes a string +func unescape(json string) string { //, error) { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return "" //, errors.New("invalid character in string") + case json[i] == '\\': + i++ + if i >= len(json) { + return "" //, errors.New("invalid escape sequence") + } + switch json[i] { + default: + return "" //, errors.New("invalid escape sequence") + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return "" //, errors.New("invalid escape sequence") + } + i++ + // extract the codepoint + var code int + for j := i; j < i+4; j++ { + switch { + default: + return "" //, errors.New("invalid escape sequence") + case json[j] >= '0' && json[j] <= '9': + code += (int(json[j]) - '0') << uint(12-(j-i)*4) + case json[j] >= 'a' && json[j] <= 'f': + code += (int(json[j]) - 'a' + 10) << uint(12-(j-i)*4) + case json[j] >= 'a' && json[j] <= 'f': + code += (int(json[j]) - 'a' + 10) << uint(12-(j-i)*4) + } + } + str = append(str, []byte(string(code))...) + i += 3 // only 3 because we will increment on the for-loop + } + } + } + return string(str) //, nil +} + +// Less return true if a token is less than another token. +// The caseSensitive paramater is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +// +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + return i, res, true + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + case 't', 'f', 'n': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + } + } + return i, res, false +} + +var ( // used for testing + testWatchForFallback bool + testLastWasFallback bool +) + +// areSimplePaths returns true if all the paths are simple enough +// to parse quickly for GetMany(). Allows alpha-numeric, dots, +// underscores, and the dollar sign. It does not allow non-alnum, +// escape characters, or keys which start with a numbers. +// For example: +// "name.last" == OK +// "user.id0" == OK +// "user.ID" == OK +// "user.first_name" == OK +// "user.firstName" == OK +// "user.0item" == BAD +// "user.#id" == BAD +// "user\.name" == BAD +func areSimplePaths(paths []string) bool { + for _, path := range paths { + var fi int // first key index, for keys with numeric prefix + for i := 0; i < len(path); i++ { + if path[i] >= 'a' && path[i] <= 'z' { + // a-z is likely to be the highest frequency charater. + continue + } + if path[i] == '.' { + fi = i + 1 + continue + } + if path[i] >= 'A' && path[i] <= 'Z' { + continue + } + if path[i] == '_' || path[i] == '$' { + continue + } + if i > fi && path[i] >= '0' && path[i] <= '9' { + continue + } + return false + } + } + return true +} + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, paths ...string) []Result { + if len(paths) < 4 { + if testWatchForFallback { + testLastWasFallback = false + } + switch len(paths) { + case 0: + // return nil when no paths are specified. + return nil + case 1: + return []Result{Get(json, paths[0])} + case 2: + return []Result{Get(json, paths[0]), Get(json, paths[1])} + case 3: + return []Result{Get(json, paths[0]), Get(json, paths[1]), Get(json, paths[2])} + } + } + var results []Result + var ok bool + var i int + if len(paths) > 512 { + // we can only support up to 512 paths. Is that too many? + goto fallback + } + if !areSimplePaths(paths) { + // If there is even one path that is not considered "simple" then + // we need to use the fallback method. + goto fallback + } + // locate the object token. + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + break + } + if json[i] <= ' ' { + continue + } + goto fallback + } + // use the call function table. + if len(paths) <= 8 { + results, ok = getMany8(json, i, paths) + } else if len(paths) <= 16 { + results, ok = getMany16(json, i, paths) + } else if len(paths) <= 32 { + results, ok = getMany32(json, i, paths) + } else if len(paths) <= 64 { + results, ok = getMany64(json, i, paths) + } else if len(paths) <= 128 { + results, ok = getMany128(json, i, paths) + } else if len(paths) <= 256 { + results, ok = getMany256(json, i, paths) + } else if len(paths) <= 512 { + results, ok = getMany512(json, i, paths) + } + if !ok { + // there was some fault while parsing. we should try the + // fallback method. This could result in performance + // degregation in some cases. + goto fallback + } + if testWatchForFallback { + testLastWasFallback = false + } + return results +fallback: + results = results[:0] + for i := 0; i < len(paths); i++ { + results = append(results, Get(json, paths[i])) + } + if testWatchForFallback { + testLastWasFallback = true + } + return results +} + +// GetManyBytes searches json for the specified path. +// If working with bytes, this method preferred over +// GetMany(string(data), paths...) +func GetManyBytes(json []byte, paths ...string) []Result { + if json == nil { + return GetMany("", paths...) + } + results := GetMany(*(*string)(unsafe.Pointer(&json)), paths...) + for i := range results { + results[i] = fromBytesGet(results[i]) + } + return results +} + +// parseGetMany parses a json object for keys that match against the callers +// paths. It's a best-effort attempt and quickly locating and assigning the +// values to the []Result array. If there are failures such as bad json, or +// invalid input paths, or too much recursion, the function will exit with a +// return value of 'false'. +func parseGetMany( + json string, i int, + level uint, kplen int, + paths []string, completed []bool, matches []uint64, results []Result, +) (int, bool) { + if level > 62 { + // The recursion level is limited because the matches []uint64 + // array cannot handle more the 64-bits. + return i, false + } + // At this point the last character read was a '{'. + // Read all object keys and try to match against the paths. + var key string + var val string + var vesc, ok bool +next_key: + for ; i < len(json); i++ { + if json[i] == '"' { + // read the key + i, val, vesc, ok = parseString(json, i+1) + if !ok { + return i, false + } + if vesc { + // the value is escaped + key = unescape(val[1 : len(val)-1]) + } else { + // just a plain old ascii key + key = val[1 : len(val)-1] + } + var hasMatch bool + var parsedVal bool + var valOrgIndex int + var valPathIndex int + for j := 0; j < len(key); j++ { + if key[j] == '.' { + // we need to look for keys with dot and ignore them. + if i, _, ok = parseAny(json, i, false); !ok { + return i, false + } + continue next_key + } + } + var usedPaths int + // loop through paths and look for matches + for j := 0; j < len(paths); j++ { + if completed[j] { + usedPaths++ + // ignore completed paths + continue + } + if level > 0 && (matches[j]>>(level-1))&1 == 0 { + // ignore unmatched paths + usedPaths++ + continue + } + + // try to match the key to the path + // this is spaghetti code but the idea is to minimize + // calls and variable assignments when comparing the + // key to paths + if len(paths[j])-kplen >= len(key) { + i, k := kplen, 0 + for ; k < len(key); k, i = k+1, i+1 { + if key[k] != paths[j][i] { + // no match + goto nomatch + } + } + if i < len(paths[j]) { + if paths[j][i] == '.' { + // matched, but there still more keys in the path + goto match_not_atend + } + } + // matched and at the end of the path + goto match_atend + } + // no match, jump to the nomatch label + goto nomatch + match_atend: + // found a match + // at the end of the path. we must take the value. + usedPaths++ + if !parsedVal { + // the value has not been parsed yet. let's do so. + valOrgIndex = i // keep track of the current position. + i, results[j], ok = parseAny(json, i, true) + if !ok { + return i, false + } + parsedVal = true + valPathIndex = j + } else { + results[j] = results[valPathIndex] + } + // mark as complete + completed[j] = true + // jump over the match_not_atend label + goto nomatch + match_not_atend: + // found a match + // still in the middle of the path. + usedPaths++ + // mark the path as matched + matches[j] |= 1 << level + if !hasMatch { + hasMatch = true + } + nomatch: // noop label + } + + if !parsedVal { + if hasMatch { + // we found a match and the value has not been parsed yet. + // let's find out if the next value type is an object. + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ':' { + continue + } + break + } + if i < len(json) { + if json[i] == '{' { + // it's an object. let's go deeper + i, ok = parseGetMany(json, i+1, level+1, kplen+len(key)+1, paths, completed, matches, results) + if !ok { + return i, false + } + } else { + // not an object. just parse and ignore. + if i, _, ok = parseAny(json, i, false); !ok { + return i, false + } + } + } + } else { + // Since there was no matches we can just parse the value and + // ignore the result. + if i, _, ok = parseAny(json, i, false); !ok { + return i, false + } + } + } else if hasMatch && len(results[valPathIndex].Raw) > 0 && results[valPathIndex].Raw[0] == '{' { + // The value was already parsed and the value type is an object. + // Rewind the json index and let's parse deeper. + i = valOrgIndex + for ; i < len(json); i++ { + if json[i] == '{' { + break + } + } + i, ok = parseGetMany(json, i+1, level+1, kplen+len(key)+1, paths, completed, matches, results) + if !ok { + return i, false + } + } + if usedPaths == len(paths) { + // all paths have been used, either completed or matched. + // we should stop parsing this object to save CPU cycles. + if level > 0 && i < len(json) { + i, _ = parseSquash(json, i) + } + return i, true + } + } else if json[i] == '}' { + // reached the end of the object. end it here. + return i + 1, true + } + } + return i, true +} + +// Call table for GetMany. Using an isolated function allows for allocating +// arrays with know capacities on the stack, as opposed to dynamically +// allocating on the heap. This can provide a tremendous performance boost +// by avoiding the GC. +func getMany8(json string, i int, paths []string) ([]Result, bool) { + const max = 8 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany16(json string, i int, paths []string) ([]Result, bool) { + const max = 16 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany32(json string, i int, paths []string) ([]Result, bool) { + const max = 32 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany64(json string, i int, paths []string) ([]Result, bool) { + const max = 64 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany128(json string, i int, paths []string) ([]Result, bool) { + const max = 128 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany256(json string, i int, paths []string) ([]Result, bool) { + const max = 256 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} +func getMany512(json string, i int, paths []string) ([]Result, bool) { + const max = 512 + var completed = make([]bool, 0, max) + var matches = make([]uint64, 0, max) + var results = make([]Result, 0, max) + completed = completed[0:len(paths):max] + matches = matches[0:len(paths):max] + results = results[0:len(paths):max] + _, ok := parseGetMany(json, i, 0, 0, paths, completed, matches, results) + return results, ok +} diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png new file mode 100644 index 0000000..17a8bbe Binary files /dev/null and b/vendor/github.com/tidwall/gjson/logo.png differ diff --git a/vendor/github.com/tidwall/grect/LICENSE.md b/vendor/github.com/tidwall/grect/LICENSE.md new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/grect/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/grect/README.md b/vendor/github.com/tidwall/grect/README.md new file mode 100644 index 0000000..04a8bf0 --- /dev/null +++ b/vendor/github.com/tidwall/grect/README.md @@ -0,0 +1,25 @@ +GRECT +==== + +Quickly get the outer rectangle for GeoJSON, WKT, WKB. + +```go + r := grect.Get(`{ + "type": "Polygon", + "coordinates": [ + [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], + [100.0, 1.0], [100.0, 0.0] ] + ] + }`) + fmt.Printf("%v %v\n", r.Min, r.Max) + // Output: + // [100 0] [101 1] +``` + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GRECT source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/grect/grect.go b/vendor/github.com/tidwall/grect/grect.go new file mode 100644 index 0000000..13eb761 --- /dev/null +++ b/vendor/github.com/tidwall/grect/grect.go @@ -0,0 +1,337 @@ +package grect + +import ( + "strconv" + "strings" + + "github.com/tidwall/gjson" +) + +type Rect struct { + Min, Max []float64 +} + +func (r Rect) String() string { + diff := len(r.Min) != len(r.Max) + if !diff { + for i := 0; i < len(r.Min); i++ { + if r.Min[i] != r.Max[i] { + diff = true + break + } + } + } + var buf []byte + buf = append(buf, '[') + for i, v := range r.Min { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + if diff { + buf = append(buf, ']', ',', '[') + for i, v := range r.Max { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + } + buf = append(buf, ']') + return string(buf) +} + +func normalize(min, max []float64) (nmin, nmax []float64) { + if len(max) == 0 { + return min, min + } else if len(max) != len(min) { + if len(max) < len(min) { + max = append(max, min[len(max):]...) + } else if len(min) < len(max) { + min = append(min, max[len(min):]...) + } + } + match := true + for i := 0; i < len(min); i++ { + if min[i] != max[i] { + if match { + match = false + } + if min[i] > max[i] { + min[i], max[i] = max[i], min[i] + } + } + } + if match { + return min, min + } + return min, max +} + +func Get(s string) Rect { + var i int + var ws bool + var min, max []float64 + for ; i < len(s); i++ { + switch s[i] { + default: + continue + case ' ', '\t', '\r', '\n': + ws = true + continue + case '[': + min, max, i = getRect(s, i) + case '{': + min, max, i = getGeoJSON(s, i) + case 0x00, 0x01: + if !ws { + // return parseWKB(s, i) + } + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + min, max, i = getWKT(s, i) + } + break + } + min, max = normalize(min, max) + return Rect{Min: min, Max: max} +} + +func getRect(s string, i int) (min, max []float64, ri int) { + a := s[i:] + parts := strings.Split(a, ",") + for i := 0; i < len(parts) && i < 2; i++ { + part := parts[i] + if len(part) > 0 && (part[0] <= ' ' || part[len(part)-1] <= ' ') { + part = strings.TrimSpace(part) + } + if len(part) >= 2 && part[0] == '[' && part[len(part)-1] == ']' { + pieces := strings.Split(part[1:len(part)-1], " ") + if i == 0 { + min = make([]float64, 0, len(pieces)) + } else { + max = make([]float64, 0, len(pieces)) + } + for j := 0; j < len(pieces); j++ { + piece := pieces[j] + if piece != "" { + n, _ := strconv.ParseFloat(piece, 64) + if i == 0 { + min = append(min, n) + } else { + max = append(max, n) + } + } + } + } + } + + // normalize + if len(parts) == 1 { + max = min + } else { + min, max = normalize(min, max) + } + + return min, max, len(s) +} + +func union(min1, max1, min2, max2 []float64) (umin, umax []float64) { + for i := 0; i < len(min1) || i < len(min2); i++ { + if i >= len(min1) { + // just copy min2 + umin = append(umin, min2[i]) + umax = append(umax, max2[i]) + } else if i >= len(min2) { + // just copy min1 + umin = append(umin, min1[i]) + umax = append(umax, max1[i]) + } else { + if min1[i] < min2[i] { + umin = append(umin, min1[i]) + } else { + umin = append(umin, min2[i]) + } + if max1[i] > max2[i] { + umax = append(umax, max1[i]) + } else { + umax = append(umax, max2[i]) + } + } + } + return umin, umax +} + +func getWKT(s string, i int) (min, max []float64, ri int) { + switch s[i] { + default: + for ; i < len(s); i++ { + if s[i] == ',' { + return nil, nil, i + } + if s[i] == '(' { + return getWKTAny(s, i) + } + } + return nil, nil, i + case 'g', 'G': + if len(s)-i < 18 { + return nil, nil, i + } + return getWKTGeometryCollection(s, i+18) + } +} + +func getWKTAny(s string, i int) (min, max []float64, ri int) { + min, max = make([]float64, 0, 4), make([]float64, 0, 4) + var depth int + var ni int + var idx int +loop: + for ; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '(': + depth++ + case ')', ' ', '\t', '\r', '\n', ',': + if ni != 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + idx++ + ni = 0 + } + switch s[i] { + case ')': + idx = 0 + depth-- + if depth == 0 { + i++ + break loop + } + case ',': + idx = 0 + } + } + } + return min, max, i +} + +func getWKTGeometryCollection(s string, i int) (min, max []float64, ri int) { + var depth int + for ; i < len(s); i++ { + if s[i] == ',' || s[i] == ')' { + // do not increment the index + return nil, nil, i + } + if s[i] == '(' { + depth++ + i++ + break + } + } +next: + for ; i < len(s); i++ { + switch s[i] { + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + var min2, max2 []float64 + min2, max2, i = getWKT(s, i) + min, max = union(min, max, min2, max2) + for ; i < len(s); i++ { + if s[i] == ',' { + i++ + goto next + } + if s[i] == ')' { + i++ + goto done + } + } + case ' ', '\t', '\r', '\n': + continue + default: + goto end_early + } + } +end_early: + // just balance the parens + for ; i < len(s); i++ { + if s[i] == '(' { + depth++ + } else if s[i] == ')' { + depth-- + if depth == 0 { + i++ + break + } + } + } +done: + return min, max, i +} +func getGeoJSON(s string, i int) (min, max []float64, ri int) { + json := s[i:] + switch gjson.Get(json, "type").String() { + default: + min, max = getMinMaxBrackets(gjson.Get(json, "coordinates").Raw) + case "Feature": + min, max, _ = getGeoJSON(gjson.Get(json, "geometry").String(), 0) + case "FeatureCollection": + for _, json := range gjson.Get(json, "features").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + case "GeometryCollection": + for _, json := range gjson.Get(json, "geometries").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + } + return min, max, len(json) +} + +func getMinMaxBrackets(s string) (min, max []float64) { + var ni int + var idx int + for i := 0; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '[', ',', ']', ' ', '\t', '\r', '\n': + if ni > 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + ni = 0 + idx++ + } + if s[i] == ']' { + idx = 0 + } + + } + } + + return +} diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/match/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md new file mode 100644 index 0000000..04b0aaa --- /dev/null +++ b/vendor/github.com/tidwall/match/README.md @@ -0,0 +1,31 @@ +Match +===== + + + +Match is a very simple pattern matcher where '*' matches on any +number characters and '?' matches on any one character. +Installing +---------- + +``` +go get -u github.com/tidwall/match +``` + +Example +------- + +```go +match.Match("hello", "*llo") +match.Match("jello", "?ello") +match.Match("hello", "h*o") +``` + + +Contact +------- +Josh Baker [@tidwall](http://twitter.com/tidwall) + +License +------- +Redcon source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go new file mode 100644 index 0000000..8885add --- /dev/null +++ b/vendor/github.com/tidwall/match/match.go @@ -0,0 +1,192 @@ +// Match provides a simple pattern matcher with unicode support. +package match + +import "unicode/utf8" + +// Match returns true if str matches pattern. This is a very +// simple wildcard match where '*' matches on any number characters +// and '?' matches on any one character. + +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// c matches character c (c != '*', '?', '\\') +// '\\' c matches character c +// +func Match(str, pattern string) bool { + if pattern == "*" { + return true + } + return deepMatch(str, pattern) +} +func deepMatch(str, pattern string) bool { + for len(pattern) > 0 { + if pattern[0] > 0x7f { + return deepMatchRune(str, pattern) + } + switch pattern[0] { + default: + if len(str) == 0 { + return false + } + if str[0] > 0x7f { + return deepMatchRune(str, pattern) + } + if str[0] != pattern[0] { + return false + } + case '?': + if len(str) == 0 { + return false + } + case '*': + return deepMatch(str, pattern[1:]) || + (len(str) > 0 && deepMatch(str[1:], pattern)) + } + str = str[1:] + pattern = pattern[1:] + } + return len(str) == 0 && len(pattern) == 0 +} + +func deepMatchRune(str, pattern string) bool { + var sr, pr rune + var srsz, prsz int + + // read the first rune ahead of time + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + for pr != utf8.RuneError { + switch pr { + default: + if srsz == utf8.RuneError { + return false + } + if sr != pr { + return false + } + case '?': + if srsz == utf8.RuneError { + return false + } + case '*': + return deepMatchRune(str, pattern[prsz:]) || + (srsz > 0 && deepMatchRune(str[srsz:], pattern)) + } + str = str[srsz:] + pattern = pattern[prsz:] + // read the next runes + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + } + + return srsz == 0 && prsz == 0 +} + +var maxRuneBytes = func() []byte { + b := make([]byte, 4) + if utf8.EncodeRune(b, '\U0010FFFF') != 4 { + panic("invalid rune encoding") + } + return b +}() + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) + /* + return + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + infinite = true + } else { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb), infinite + */ +} diff --git a/vendor/github.com/tidwall/rtree/LICENSE b/vendor/github.com/tidwall/rtree/LICENSE new file mode 100644 index 0000000..1a6cb67 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tidwall/rtree/README.md b/vendor/github.com/tidwall/rtree/README.md new file mode 100644 index 0000000..fa52dc3 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/README.md @@ -0,0 +1,21 @@ +RTree implementation for Go +=========================== + +[![Build Status](https://travis-ci.org/tidwall/rtree.svg?branch=master)](https://travis-ci.org/tidwall/rtree) +[![GoDoc](https://godoc.org/github.com/tidwall/rtree?status.svg)](https://godoc.org/github.com/tidwall/rtree) + +This package provides an in-memory R-Tree implementation for Go, useful as a spatial data structure. +It has support for 1-20 dimensions, and can store and search multidimensions interchangably in the same tree. + +Authors +------- +* 1983 Original algorithm and test code by Antonin Guttman and Michael Stonebraker, UC Berkely +* 1994 ANCI C ported from original test code by Melinda Green +* 1995 Sphere volume fix for degeneracy problem submitted by Paul Brook +* 2004 Templated C++ port by Greg Douglas +* 2016 Go port by Josh Baker + +License +------- +RTree source code is available under the MIT License. + diff --git a/vendor/github.com/tidwall/rtree/rtree.go b/vendor/github.com/tidwall/rtree/rtree.go new file mode 100644 index 0000000..330a1f5 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/rtree.go @@ -0,0 +1,14013 @@ +// generated; DO NOT EDIT! + +package rtree + +import "math" + +type Iterator func(item Item) bool +type Item interface { + Rect(ctx interface{}) (min []float64, max []float64) +} + +type RTree struct { + ctx interface{} + tr1 *d1RTree + tr2 *d2RTree + tr3 *d3RTree + tr4 *d4RTree + tr5 *d5RTree + tr6 *d6RTree + tr7 *d7RTree + tr8 *d8RTree + tr9 *d9RTree + tr10 *d10RTree + tr11 *d11RTree + tr12 *d12RTree + tr13 *d13RTree + tr14 *d14RTree + tr15 *d15RTree + tr16 *d16RTree + tr17 *d17RTree + tr18 *d18RTree + tr19 *d19RTree + tr20 *d20RTree +} + +func New(ctx interface{}) *RTree { + return &RTree{ + ctx: ctx, + tr1: d1New(), + tr2: d2New(), + tr3: d3New(), + tr4: d4New(), + tr5: d5New(), + tr6: d6New(), + tr7: d7New(), + tr8: d8New(), + tr9: d9New(), + tr10: d10New(), + tr11: d11New(), + tr12: d12New(), + tr13: d13New(), + tr14: d14New(), + tr15: d15New(), + tr16: d16New(), + tr17: d17New(), + tr18: d18New(), + tr19: d19New(), + tr20: d20New(), + } +} + +func (tr *RTree) Insert(item Item) { + if item == nil { + panic("nil item being added to RTree") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + switch len(min) { + default: + return // just return + panic("invalid dimension") + case 1: + var amin, amax [1]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr1.Insert(amin, amax, item) + case 2: + var amin, amax [2]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr2.Insert(amin, amax, item) + case 3: + var amin, amax [3]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr3.Insert(amin, amax, item) + case 4: + var amin, amax [4]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr4.Insert(amin, amax, item) + case 5: + var amin, amax [5]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr5.Insert(amin, amax, item) + case 6: + var amin, amax [6]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr6.Insert(amin, amax, item) + case 7: + var amin, amax [7]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr7.Insert(amin, amax, item) + case 8: + var amin, amax [8]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr8.Insert(amin, amax, item) + case 9: + var amin, amax [9]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr9.Insert(amin, amax, item) + case 10: + var amin, amax [10]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr10.Insert(amin, amax, item) + case 11: + var amin, amax [11]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr11.Insert(amin, amax, item) + case 12: + var amin, amax [12]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr12.Insert(amin, amax, item) + case 13: + var amin, amax [13]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr13.Insert(amin, amax, item) + case 14: + var amin, amax [14]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr14.Insert(amin, amax, item) + case 15: + var amin, amax [15]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr15.Insert(amin, amax, item) + case 16: + var amin, amax [16]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr16.Insert(amin, amax, item) + case 17: + var amin, amax [17]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr17.Insert(amin, amax, item) + case 18: + var amin, amax [18]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr18.Insert(amin, amax, item) + case 19: + var amin, amax [19]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr19.Insert(amin, amax, item) + case 20: + var amin, amax [20]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr20.Insert(amin, amax, item) + } +} + +func (tr *RTree) Remove(item Item) { + if item == nil { + panic("nil item being added to RTree") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + switch len(min) { + default: + return // just return + panic("invalid dimension") + case 1: + var amin, amax [1]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr1.Remove(amin, amax, item) + case 2: + var amin, amax [2]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr2.Remove(amin, amax, item) + case 3: + var amin, amax [3]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr3.Remove(amin, amax, item) + case 4: + var amin, amax [4]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr4.Remove(amin, amax, item) + case 5: + var amin, amax [5]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr5.Remove(amin, amax, item) + case 6: + var amin, amax [6]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr6.Remove(amin, amax, item) + case 7: + var amin, amax [7]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr7.Remove(amin, amax, item) + case 8: + var amin, amax [8]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr8.Remove(amin, amax, item) + case 9: + var amin, amax [9]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr9.Remove(amin, amax, item) + case 10: + var amin, amax [10]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr10.Remove(amin, amax, item) + case 11: + var amin, amax [11]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr11.Remove(amin, amax, item) + case 12: + var amin, amax [12]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr12.Remove(amin, amax, item) + case 13: + var amin, amax [13]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr13.Remove(amin, amax, item) + case 14: + var amin, amax [14]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr14.Remove(amin, amax, item) + case 15: + var amin, amax [15]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr15.Remove(amin, amax, item) + case 16: + var amin, amax [16]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr16.Remove(amin, amax, item) + case 17: + var amin, amax [17]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr17.Remove(amin, amax, item) + case 18: + var amin, amax [18]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr18.Remove(amin, amax, item) + case 19: + var amin, amax [19]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr19.Remove(amin, amax, item) + case 20: + var amin, amax [20]float64 + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + tr.tr20.Remove(amin, amax, item) + } +} +func (tr *RTree) Reset() { + tr.tr1 = d1New() + tr.tr2 = d2New() + tr.tr3 = d3New() + tr.tr4 = d4New() + tr.tr5 = d5New() + tr.tr6 = d6New() + tr.tr7 = d7New() + tr.tr8 = d8New() + tr.tr9 = d9New() + tr.tr10 = d10New() + tr.tr11 = d11New() + tr.tr12 = d12New() + tr.tr13 = d13New() + tr.tr14 = d14New() + tr.tr15 = d15New() + tr.tr16 = d16New() + tr.tr17 = d17New() + tr.tr18 = d18New() + tr.tr19 = d19New() + tr.tr20 = d20New() +} +func (tr *RTree) Count() int { + count := 0 + count += tr.tr1.Count() + count += tr.tr2.Count() + count += tr.tr3.Count() + count += tr.tr4.Count() + count += tr.tr5.Count() + count += tr.tr6.Count() + count += tr.tr7.Count() + count += tr.tr8.Count() + count += tr.tr9.Count() + count += tr.tr10.Count() + count += tr.tr11.Count() + count += tr.tr12.Count() + count += tr.tr13.Count() + count += tr.tr14.Count() + count += tr.tr15.Count() + count += tr.tr16.Count() + count += tr.tr17.Count() + count += tr.tr18.Count() + count += tr.tr19.Count() + count += tr.tr20.Count() + return count +} +func (tr *RTree) Search(bounds Item, iter Iterator) { + if bounds == nil { + panic("nil bounds being used for search") + } + min, max := bounds.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + switch len(min) { + default: + return // just return + panic("invalid dimension") + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + case 15: + case 16: + case 17: + case 18: + case 19: + case 20: + } + if !tr.search1(min, max, iter) { + return + } + if !tr.search2(min, max, iter) { + return + } + if !tr.search3(min, max, iter) { + return + } + if !tr.search4(min, max, iter) { + return + } + if !tr.search5(min, max, iter) { + return + } + if !tr.search6(min, max, iter) { + return + } + if !tr.search7(min, max, iter) { + return + } + if !tr.search8(min, max, iter) { + return + } + if !tr.search9(min, max, iter) { + return + } + if !tr.search10(min, max, iter) { + return + } + if !tr.search11(min, max, iter) { + return + } + if !tr.search12(min, max, iter) { + return + } + if !tr.search13(min, max, iter) { + return + } + if !tr.search14(min, max, iter) { + return + } + if !tr.search15(min, max, iter) { + return + } + if !tr.search16(min, max, iter) { + return + } + if !tr.search17(min, max, iter) { + return + } + if !tr.search18(min, max, iter) { + return + } + if !tr.search19(min, max, iter) { + return + } + if !tr.search20(min, max, iter) { + return + } +} + +func (tr *RTree) search1(min, max []float64, iter Iterator) bool { + var amin, amax [1]float64 + for i := 0; i < 1; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr1.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search2(min, max []float64, iter Iterator) bool { + var amin, amax [2]float64 + for i := 0; i < 2; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr2.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search3(min, max []float64, iter Iterator) bool { + var amin, amax [3]float64 + for i := 0; i < 3; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr3.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search4(min, max []float64, iter Iterator) bool { + var amin, amax [4]float64 + for i := 0; i < 4; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr4.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search5(min, max []float64, iter Iterator) bool { + var amin, amax [5]float64 + for i := 0; i < 5; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr5.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search6(min, max []float64, iter Iterator) bool { + var amin, amax [6]float64 + for i := 0; i < 6; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr6.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search7(min, max []float64, iter Iterator) bool { + var amin, amax [7]float64 + for i := 0; i < 7; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr7.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search8(min, max []float64, iter Iterator) bool { + var amin, amax [8]float64 + for i := 0; i < 8; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr8.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search9(min, max []float64, iter Iterator) bool { + var amin, amax [9]float64 + for i := 0; i < 9; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr9.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search10(min, max []float64, iter Iterator) bool { + var amin, amax [10]float64 + for i := 0; i < 10; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr10.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search11(min, max []float64, iter Iterator) bool { + var amin, amax [11]float64 + for i := 0; i < 11; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr11.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search12(min, max []float64, iter Iterator) bool { + var amin, amax [12]float64 + for i := 0; i < 12; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr12.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search13(min, max []float64, iter Iterator) bool { + var amin, amax [13]float64 + for i := 0; i < 13; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr13.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search14(min, max []float64, iter Iterator) bool { + var amin, amax [14]float64 + for i := 0; i < 14; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr14.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search15(min, max []float64, iter Iterator) bool { + var amin, amax [15]float64 + for i := 0; i < 15; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr15.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search16(min, max []float64, iter Iterator) bool { + var amin, amax [16]float64 + for i := 0; i < 16; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr16.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search17(min, max []float64, iter Iterator) bool { + var amin, amax [17]float64 + for i := 0; i < 17; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr17.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search18(min, max []float64, iter Iterator) bool { + var amin, amax [18]float64 + for i := 0; i < 18; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr18.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search19(min, max []float64, iter Iterator) bool { + var amin, amax [19]float64 + for i := 0; i < 19; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr19.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) search20(min, max []float64, iter Iterator) bool { + var amin, amax [20]float64 + for i := 0; i < 20; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + ended := false + tr.tr20.Search(amin, amax, func(dataID interface{}) bool { + if !iter(dataID.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func d1fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d1fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d1numDims = 1 + d1maxNodes = 8 + d1minNodes = d1maxNodes / 2 + d1useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d1unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d1numDims] + +type d1RTree struct { + root *d1nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d1rectT struct { + min [d1numDims]float64 ///< Min dimensions of bounding box + max [d1numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d1branchT struct { + rect d1rectT ///< Bounds + child *d1nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d1nodeT for each branch level +type d1nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d1maxNodes]d1branchT ///< Branch +} + +func (node *d1nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d1nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d1listNodeT struct { + next *d1listNodeT ///< Next in list + node *d1nodeT ///< Node +} + +const d1notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d1partitionVarsT struct { + partition [d1maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d1rectT + area [2]float64 + + branchBuf [d1maxNodes + 1]d1branchT + branchCount int + coverSplit d1rectT + coverSplitArea float64 +} + +func d1New() *d1RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d1RTree{ + root: &d1nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d1RTree) Insert(min, max [d1numDims]float64, dataId interface{}) { + var branch d1branchT + branch.data = dataId + for axis := 0; axis < d1numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d1insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d1RTree) Remove(min, max [d1numDims]float64, dataId interface{}) { + var rect d1rectT + for axis := 0; axis < d1numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d1removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d1search rectangle +/// \param a_min Min of d1search bounding rect +/// \param a_max Max of d1search bounding rect +/// \param a_searchResult d1search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d1RTree) Search(min, max [d1numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d1rectT + for axis := 0; axis < d1numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d1search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d1RTree) Count() int { + var count int + d1countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d1RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d1nodeT{} +} + +func d1countRec(node *d1nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d1countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d1insertRectRec(branch *d1branchT, node *d1nodeT, newNode **d1nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d1nodeT + //var newBranch d1branchT + + // find the optimal branch for this record + index := d1pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d1insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d1combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d1nodeCover(node.branch[index].child) + var newBranch d1branchT + newBranch.child = otherNode + newBranch.rect = d1nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d1addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d1addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d1insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d1insertRect(branch *d1branchT, root **d1nodeT, level int) bool { + var newNode *d1nodeT + + if d1insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d1nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d1branchT + + // add old root node as a child of the new root + newBranch.rect = d1nodeCover(*root) + newBranch.child = *root + d1addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d1nodeCover(newNode) + newBranch.child = newNode + d1addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d1nodeCover(node *d1nodeT) d1rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d1combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d1addBranch(branch *d1branchT, node *d1nodeT, newNode **d1nodeT) bool { + if node.count < d1maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d1splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d1disconnectBranch(node *d1nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d1pickBranch(rect *d1rectT, node *d1nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d1rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d1calcRectVolume(curRect) + tempRect = d1combineRect(rect, curRect) + increase = d1calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d1combineRect(rectA, rectB *d1rectT) d1rectT { + var newRect d1rectT + + for index := 0; index < d1numDims; index++ { + newRect.min[index] = d1fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d1fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d1splitNode(node *d1nodeT, branch *d1branchT, newNode **d1nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d1partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d1getBranches(node, branch, parVars) + + // Find partition + d1choosePartition(parVars, d1minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d1nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d1loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d1rectVolume(rect *d1rectT) float64 { + var volume float64 = 1 + for index := 0; index < d1numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d1rectT +func d1rectSphericalVolume(rect *d1rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d1numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d1numDims == 5 { + return (radius * radius * radius * radius * radius * d1unitSphereVolume) + } else if d1numDims == 4 { + return (radius * radius * radius * radius * d1unitSphereVolume) + } else if d1numDims == 3 { + return (radius * radius * radius * d1unitSphereVolume) + } else if d1numDims == 2 { + return (radius * radius * d1unitSphereVolume) + } else { + return (math.Pow(radius, d1numDims) * d1unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d1calcRectVolume(rect *d1rectT) float64 { + if d1useSphericalVolume { + return d1rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d1rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d1getBranches(node *d1nodeT, branch *d1branchT, parVars *d1partitionVarsT) { + // Load the branch buffer + for index := 0; index < d1maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d1maxNodes] = *branch + parVars.branchCount = d1maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d1maxNodes+1; index++ { + parVars.coverSplit = d1combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d1calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d1choosePartition(parVars *d1partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d1initParVars(parVars, parVars.branchCount, minFill) + d1pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d1notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d1combineRect(curRect, &parVars.cover[0]) + rect1 := d1combineRect(curRect, &parVars.cover[1]) + growth0 := d1calcRectVolume(&rect0) - parVars.area[0] + growth1 := d1calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d1classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d1notTaken == parVars.partition[index] { + d1classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d1loadNodes(nodeA, nodeB *d1nodeT, parVars *d1partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d1nodeT{nodeA, nodeB} + + // It is assured that d1addBranch here will not cause a node split. + d1addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d1partitionVarsT structure. +func d1initParVars(parVars *d1partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d1notTaken + } +} + +func d1pickSeeds(parVars *d1partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d1maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d1calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d1combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d1calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d1classify(seed0, 0, parVars) + d1classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d1classify(index, group int, parVars *d1partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d1combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d1calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d1rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d1removeRect provides for eliminating the root. +func d1removeRect(rect *d1rectT, id interface{}, root **d1nodeT) bool { + var reInsertList *d1listNodeT + + if !d1removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d1insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d1removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d1removeRectRec(rect *d1rectT, id interface{}, node *d1nodeT, listNode **d1listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d1overlap(*rect, node.branch[index].rect) { + if !d1removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d1minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d1nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d1reInsert(node.branch[index].child, listNode) + d1disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d1disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d1overlap. +func d1overlap(rectA, rectB d1rectT) bool { + for index := 0; index < d1numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d1reInsert(node *d1nodeT, listNode **d1listNodeT) { + newListNode := &d1listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d1search in an index tree or subtree for all data retangles that d1overlap the argument rectangle. +func d1search(node *d1nodeT, rect d1rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d1overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d1search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d1overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d2fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d2fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d2numDims = 2 + d2maxNodes = 8 + d2minNodes = d2maxNodes / 2 + d2useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d2unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d2numDims] + +type d2RTree struct { + root *d2nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d2rectT struct { + min [d2numDims]float64 ///< Min dimensions of bounding box + max [d2numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d2branchT struct { + rect d2rectT ///< Bounds + child *d2nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d2nodeT for each branch level +type d2nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d2maxNodes]d2branchT ///< Branch +} + +func (node *d2nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d2nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d2listNodeT struct { + next *d2listNodeT ///< Next in list + node *d2nodeT ///< Node +} + +const d2notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d2partitionVarsT struct { + partition [d2maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d2rectT + area [2]float64 + + branchBuf [d2maxNodes + 1]d2branchT + branchCount int + coverSplit d2rectT + coverSplitArea float64 +} + +func d2New() *d2RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d2RTree{ + root: &d2nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d2RTree) Insert(min, max [d2numDims]float64, dataId interface{}) { + var branch d2branchT + branch.data = dataId + for axis := 0; axis < d2numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d2insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d2RTree) Remove(min, max [d2numDims]float64, dataId interface{}) { + var rect d2rectT + for axis := 0; axis < d2numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d2removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d2search rectangle +/// \param a_min Min of d2search bounding rect +/// \param a_max Max of d2search bounding rect +/// \param a_searchResult d2search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d2RTree) Search(min, max [d2numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d2rectT + for axis := 0; axis < d2numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d2search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d2RTree) Count() int { + var count int + d2countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d2RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d2nodeT{} +} + +func d2countRec(node *d2nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d2countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d2insertRectRec(branch *d2branchT, node *d2nodeT, newNode **d2nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d2nodeT + //var newBranch d2branchT + + // find the optimal branch for this record + index := d2pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d2insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d2combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d2nodeCover(node.branch[index].child) + var newBranch d2branchT + newBranch.child = otherNode + newBranch.rect = d2nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d2addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d2addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d2insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d2insertRect(branch *d2branchT, root **d2nodeT, level int) bool { + var newNode *d2nodeT + + if d2insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d2nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d2branchT + + // add old root node as a child of the new root + newBranch.rect = d2nodeCover(*root) + newBranch.child = *root + d2addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d2nodeCover(newNode) + newBranch.child = newNode + d2addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d2nodeCover(node *d2nodeT) d2rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d2combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d2addBranch(branch *d2branchT, node *d2nodeT, newNode **d2nodeT) bool { + if node.count < d2maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d2splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d2disconnectBranch(node *d2nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d2pickBranch(rect *d2rectT, node *d2nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d2rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d2calcRectVolume(curRect) + tempRect = d2combineRect(rect, curRect) + increase = d2calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d2combineRect(rectA, rectB *d2rectT) d2rectT { + var newRect d2rectT + + for index := 0; index < d2numDims; index++ { + newRect.min[index] = d2fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d2fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d2splitNode(node *d2nodeT, branch *d2branchT, newNode **d2nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d2partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d2getBranches(node, branch, parVars) + + // Find partition + d2choosePartition(parVars, d2minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d2nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d2loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d2rectVolume(rect *d2rectT) float64 { + var volume float64 = 1 + for index := 0; index < d2numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d2rectT +func d2rectSphericalVolume(rect *d2rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d2numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d2numDims == 5 { + return (radius * radius * radius * radius * radius * d2unitSphereVolume) + } else if d2numDims == 4 { + return (radius * radius * radius * radius * d2unitSphereVolume) + } else if d2numDims == 3 { + return (radius * radius * radius * d2unitSphereVolume) + } else if d2numDims == 2 { + return (radius * radius * d2unitSphereVolume) + } else { + return (math.Pow(radius, d2numDims) * d2unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d2calcRectVolume(rect *d2rectT) float64 { + if d2useSphericalVolume { + return d2rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d2rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d2getBranches(node *d2nodeT, branch *d2branchT, parVars *d2partitionVarsT) { + // Load the branch buffer + for index := 0; index < d2maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d2maxNodes] = *branch + parVars.branchCount = d2maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d2maxNodes+1; index++ { + parVars.coverSplit = d2combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d2calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d2choosePartition(parVars *d2partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d2initParVars(parVars, parVars.branchCount, minFill) + d2pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d2notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d2combineRect(curRect, &parVars.cover[0]) + rect1 := d2combineRect(curRect, &parVars.cover[1]) + growth0 := d2calcRectVolume(&rect0) - parVars.area[0] + growth1 := d2calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d2classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d2notTaken == parVars.partition[index] { + d2classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d2loadNodes(nodeA, nodeB *d2nodeT, parVars *d2partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d2nodeT{nodeA, nodeB} + + // It is assured that d2addBranch here will not cause a node split. + d2addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d2partitionVarsT structure. +func d2initParVars(parVars *d2partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d2notTaken + } +} + +func d2pickSeeds(parVars *d2partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d2maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d2calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d2combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d2calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d2classify(seed0, 0, parVars) + d2classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d2classify(index, group int, parVars *d2partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d2combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d2calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d2rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d2removeRect provides for eliminating the root. +func d2removeRect(rect *d2rectT, id interface{}, root **d2nodeT) bool { + var reInsertList *d2listNodeT + + if !d2removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d2insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d2removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d2removeRectRec(rect *d2rectT, id interface{}, node *d2nodeT, listNode **d2listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d2overlap(*rect, node.branch[index].rect) { + if !d2removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d2minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d2nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d2reInsert(node.branch[index].child, listNode) + d2disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d2disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d2overlap. +func d2overlap(rectA, rectB d2rectT) bool { + for index := 0; index < d2numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d2reInsert(node *d2nodeT, listNode **d2listNodeT) { + newListNode := &d2listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d2search in an index tree or subtree for all data retangles that d2overlap the argument rectangle. +func d2search(node *d2nodeT, rect d2rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d2overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d2search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d2overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d3fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d3fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d3numDims = 3 + d3maxNodes = 8 + d3minNodes = d3maxNodes / 2 + d3useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d3unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d3numDims] + +type d3RTree struct { + root *d3nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d3rectT struct { + min [d3numDims]float64 ///< Min dimensions of bounding box + max [d3numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d3branchT struct { + rect d3rectT ///< Bounds + child *d3nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d3nodeT for each branch level +type d3nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d3maxNodes]d3branchT ///< Branch +} + +func (node *d3nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d3nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d3listNodeT struct { + next *d3listNodeT ///< Next in list + node *d3nodeT ///< Node +} + +const d3notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d3partitionVarsT struct { + partition [d3maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d3rectT + area [2]float64 + + branchBuf [d3maxNodes + 1]d3branchT + branchCount int + coverSplit d3rectT + coverSplitArea float64 +} + +func d3New() *d3RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d3RTree{ + root: &d3nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d3RTree) Insert(min, max [d3numDims]float64, dataId interface{}) { + var branch d3branchT + branch.data = dataId + for axis := 0; axis < d3numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d3insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d3RTree) Remove(min, max [d3numDims]float64, dataId interface{}) { + var rect d3rectT + for axis := 0; axis < d3numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d3removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d3search rectangle +/// \param a_min Min of d3search bounding rect +/// \param a_max Max of d3search bounding rect +/// \param a_searchResult d3search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d3RTree) Search(min, max [d3numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d3rectT + for axis := 0; axis < d3numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d3search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d3RTree) Count() int { + var count int + d3countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d3RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d3nodeT{} +} + +func d3countRec(node *d3nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d3countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d3insertRectRec(branch *d3branchT, node *d3nodeT, newNode **d3nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d3nodeT + //var newBranch d3branchT + + // find the optimal branch for this record + index := d3pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d3insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d3combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d3nodeCover(node.branch[index].child) + var newBranch d3branchT + newBranch.child = otherNode + newBranch.rect = d3nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d3addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d3addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d3insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d3insertRect(branch *d3branchT, root **d3nodeT, level int) bool { + var newNode *d3nodeT + + if d3insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d3nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d3branchT + + // add old root node as a child of the new root + newBranch.rect = d3nodeCover(*root) + newBranch.child = *root + d3addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d3nodeCover(newNode) + newBranch.child = newNode + d3addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d3nodeCover(node *d3nodeT) d3rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d3combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d3addBranch(branch *d3branchT, node *d3nodeT, newNode **d3nodeT) bool { + if node.count < d3maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d3splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d3disconnectBranch(node *d3nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d3pickBranch(rect *d3rectT, node *d3nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d3rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d3calcRectVolume(curRect) + tempRect = d3combineRect(rect, curRect) + increase = d3calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d3combineRect(rectA, rectB *d3rectT) d3rectT { + var newRect d3rectT + + for index := 0; index < d3numDims; index++ { + newRect.min[index] = d3fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d3fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d3splitNode(node *d3nodeT, branch *d3branchT, newNode **d3nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d3partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d3getBranches(node, branch, parVars) + + // Find partition + d3choosePartition(parVars, d3minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d3nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d3loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d3rectVolume(rect *d3rectT) float64 { + var volume float64 = 1 + for index := 0; index < d3numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d3rectT +func d3rectSphericalVolume(rect *d3rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d3numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d3numDims == 5 { + return (radius * radius * radius * radius * radius * d3unitSphereVolume) + } else if d3numDims == 4 { + return (radius * radius * radius * radius * d3unitSphereVolume) + } else if d3numDims == 3 { + return (radius * radius * radius * d3unitSphereVolume) + } else if d3numDims == 2 { + return (radius * radius * d3unitSphereVolume) + } else { + return (math.Pow(radius, d3numDims) * d3unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d3calcRectVolume(rect *d3rectT) float64 { + if d3useSphericalVolume { + return d3rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d3rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d3getBranches(node *d3nodeT, branch *d3branchT, parVars *d3partitionVarsT) { + // Load the branch buffer + for index := 0; index < d3maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d3maxNodes] = *branch + parVars.branchCount = d3maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d3maxNodes+1; index++ { + parVars.coverSplit = d3combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d3calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d3choosePartition(parVars *d3partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d3initParVars(parVars, parVars.branchCount, minFill) + d3pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d3notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d3combineRect(curRect, &parVars.cover[0]) + rect1 := d3combineRect(curRect, &parVars.cover[1]) + growth0 := d3calcRectVolume(&rect0) - parVars.area[0] + growth1 := d3calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d3classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d3notTaken == parVars.partition[index] { + d3classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d3loadNodes(nodeA, nodeB *d3nodeT, parVars *d3partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d3nodeT{nodeA, nodeB} + + // It is assured that d3addBranch here will not cause a node split. + d3addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d3partitionVarsT structure. +func d3initParVars(parVars *d3partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d3notTaken + } +} + +func d3pickSeeds(parVars *d3partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d3maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d3calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d3combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d3calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d3classify(seed0, 0, parVars) + d3classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d3classify(index, group int, parVars *d3partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d3combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d3calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d3rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d3removeRect provides for eliminating the root. +func d3removeRect(rect *d3rectT, id interface{}, root **d3nodeT) bool { + var reInsertList *d3listNodeT + + if !d3removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d3insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d3removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d3removeRectRec(rect *d3rectT, id interface{}, node *d3nodeT, listNode **d3listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d3overlap(*rect, node.branch[index].rect) { + if !d3removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d3minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d3nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d3reInsert(node.branch[index].child, listNode) + d3disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d3disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d3overlap. +func d3overlap(rectA, rectB d3rectT) bool { + for index := 0; index < d3numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d3reInsert(node *d3nodeT, listNode **d3listNodeT) { + newListNode := &d3listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d3search in an index tree or subtree for all data retangles that d3overlap the argument rectangle. +func d3search(node *d3nodeT, rect d3rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d3overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d3search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d3overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d4fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d4fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d4numDims = 4 + d4maxNodes = 8 + d4minNodes = d4maxNodes / 2 + d4useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d4unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d4numDims] + +type d4RTree struct { + root *d4nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d4rectT struct { + min [d4numDims]float64 ///< Min dimensions of bounding box + max [d4numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d4branchT struct { + rect d4rectT ///< Bounds + child *d4nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d4nodeT for each branch level +type d4nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d4maxNodes]d4branchT ///< Branch +} + +func (node *d4nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d4nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d4listNodeT struct { + next *d4listNodeT ///< Next in list + node *d4nodeT ///< Node +} + +const d4notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d4partitionVarsT struct { + partition [d4maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d4rectT + area [2]float64 + + branchBuf [d4maxNodes + 1]d4branchT + branchCount int + coverSplit d4rectT + coverSplitArea float64 +} + +func d4New() *d4RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d4RTree{ + root: &d4nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d4RTree) Insert(min, max [d4numDims]float64, dataId interface{}) { + var branch d4branchT + branch.data = dataId + for axis := 0; axis < d4numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d4insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d4RTree) Remove(min, max [d4numDims]float64, dataId interface{}) { + var rect d4rectT + for axis := 0; axis < d4numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d4removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d4search rectangle +/// \param a_min Min of d4search bounding rect +/// \param a_max Max of d4search bounding rect +/// \param a_searchResult d4search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d4RTree) Search(min, max [d4numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d4rectT + for axis := 0; axis < d4numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d4search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d4RTree) Count() int { + var count int + d4countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d4RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d4nodeT{} +} + +func d4countRec(node *d4nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d4countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d4insertRectRec(branch *d4branchT, node *d4nodeT, newNode **d4nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d4nodeT + //var newBranch d4branchT + + // find the optimal branch for this record + index := d4pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d4insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d4combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d4nodeCover(node.branch[index].child) + var newBranch d4branchT + newBranch.child = otherNode + newBranch.rect = d4nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d4addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d4addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d4insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d4insertRect(branch *d4branchT, root **d4nodeT, level int) bool { + var newNode *d4nodeT + + if d4insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d4nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d4branchT + + // add old root node as a child of the new root + newBranch.rect = d4nodeCover(*root) + newBranch.child = *root + d4addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d4nodeCover(newNode) + newBranch.child = newNode + d4addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d4nodeCover(node *d4nodeT) d4rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d4combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d4addBranch(branch *d4branchT, node *d4nodeT, newNode **d4nodeT) bool { + if node.count < d4maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d4splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d4disconnectBranch(node *d4nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d4pickBranch(rect *d4rectT, node *d4nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d4rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d4calcRectVolume(curRect) + tempRect = d4combineRect(rect, curRect) + increase = d4calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d4combineRect(rectA, rectB *d4rectT) d4rectT { + var newRect d4rectT + + for index := 0; index < d4numDims; index++ { + newRect.min[index] = d4fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d4fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d4splitNode(node *d4nodeT, branch *d4branchT, newNode **d4nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d4partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d4getBranches(node, branch, parVars) + + // Find partition + d4choosePartition(parVars, d4minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d4nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d4loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d4rectVolume(rect *d4rectT) float64 { + var volume float64 = 1 + for index := 0; index < d4numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d4rectT +func d4rectSphericalVolume(rect *d4rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d4numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d4numDims == 5 { + return (radius * radius * radius * radius * radius * d4unitSphereVolume) + } else if d4numDims == 4 { + return (radius * radius * radius * radius * d4unitSphereVolume) + } else if d4numDims == 3 { + return (radius * radius * radius * d4unitSphereVolume) + } else if d4numDims == 2 { + return (radius * radius * d4unitSphereVolume) + } else { + return (math.Pow(radius, d4numDims) * d4unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d4calcRectVolume(rect *d4rectT) float64 { + if d4useSphericalVolume { + return d4rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d4rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d4getBranches(node *d4nodeT, branch *d4branchT, parVars *d4partitionVarsT) { + // Load the branch buffer + for index := 0; index < d4maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d4maxNodes] = *branch + parVars.branchCount = d4maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d4maxNodes+1; index++ { + parVars.coverSplit = d4combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d4calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d4choosePartition(parVars *d4partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d4initParVars(parVars, parVars.branchCount, minFill) + d4pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d4notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d4combineRect(curRect, &parVars.cover[0]) + rect1 := d4combineRect(curRect, &parVars.cover[1]) + growth0 := d4calcRectVolume(&rect0) - parVars.area[0] + growth1 := d4calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d4classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d4notTaken == parVars.partition[index] { + d4classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d4loadNodes(nodeA, nodeB *d4nodeT, parVars *d4partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d4nodeT{nodeA, nodeB} + + // It is assured that d4addBranch here will not cause a node split. + d4addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d4partitionVarsT structure. +func d4initParVars(parVars *d4partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d4notTaken + } +} + +func d4pickSeeds(parVars *d4partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d4maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d4calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d4combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d4calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d4classify(seed0, 0, parVars) + d4classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d4classify(index, group int, parVars *d4partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d4combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d4calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d4rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d4removeRect provides for eliminating the root. +func d4removeRect(rect *d4rectT, id interface{}, root **d4nodeT) bool { + var reInsertList *d4listNodeT + + if !d4removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d4insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d4removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d4removeRectRec(rect *d4rectT, id interface{}, node *d4nodeT, listNode **d4listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d4overlap(*rect, node.branch[index].rect) { + if !d4removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d4minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d4nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d4reInsert(node.branch[index].child, listNode) + d4disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d4disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d4overlap. +func d4overlap(rectA, rectB d4rectT) bool { + for index := 0; index < d4numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d4reInsert(node *d4nodeT, listNode **d4listNodeT) { + newListNode := &d4listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d4search in an index tree or subtree for all data retangles that d4overlap the argument rectangle. +func d4search(node *d4nodeT, rect d4rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d4overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d4search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d4overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d5fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d5fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d5numDims = 5 + d5maxNodes = 8 + d5minNodes = d5maxNodes / 2 + d5useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d5unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d5numDims] + +type d5RTree struct { + root *d5nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d5rectT struct { + min [d5numDims]float64 ///< Min dimensions of bounding box + max [d5numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d5branchT struct { + rect d5rectT ///< Bounds + child *d5nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d5nodeT for each branch level +type d5nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d5maxNodes]d5branchT ///< Branch +} + +func (node *d5nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d5nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d5listNodeT struct { + next *d5listNodeT ///< Next in list + node *d5nodeT ///< Node +} + +const d5notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d5partitionVarsT struct { + partition [d5maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d5rectT + area [2]float64 + + branchBuf [d5maxNodes + 1]d5branchT + branchCount int + coverSplit d5rectT + coverSplitArea float64 +} + +func d5New() *d5RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d5RTree{ + root: &d5nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d5RTree) Insert(min, max [d5numDims]float64, dataId interface{}) { + var branch d5branchT + branch.data = dataId + for axis := 0; axis < d5numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d5insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d5RTree) Remove(min, max [d5numDims]float64, dataId interface{}) { + var rect d5rectT + for axis := 0; axis < d5numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d5removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d5search rectangle +/// \param a_min Min of d5search bounding rect +/// \param a_max Max of d5search bounding rect +/// \param a_searchResult d5search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d5RTree) Search(min, max [d5numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d5rectT + for axis := 0; axis < d5numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d5search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d5RTree) Count() int { + var count int + d5countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d5RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d5nodeT{} +} + +func d5countRec(node *d5nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d5countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d5insertRectRec(branch *d5branchT, node *d5nodeT, newNode **d5nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d5nodeT + //var newBranch d5branchT + + // find the optimal branch for this record + index := d5pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d5insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d5combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d5nodeCover(node.branch[index].child) + var newBranch d5branchT + newBranch.child = otherNode + newBranch.rect = d5nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d5addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d5addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d5insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d5insertRect(branch *d5branchT, root **d5nodeT, level int) bool { + var newNode *d5nodeT + + if d5insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d5nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d5branchT + + // add old root node as a child of the new root + newBranch.rect = d5nodeCover(*root) + newBranch.child = *root + d5addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d5nodeCover(newNode) + newBranch.child = newNode + d5addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d5nodeCover(node *d5nodeT) d5rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d5combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d5addBranch(branch *d5branchT, node *d5nodeT, newNode **d5nodeT) bool { + if node.count < d5maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d5splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d5disconnectBranch(node *d5nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d5pickBranch(rect *d5rectT, node *d5nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d5rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d5calcRectVolume(curRect) + tempRect = d5combineRect(rect, curRect) + increase = d5calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d5combineRect(rectA, rectB *d5rectT) d5rectT { + var newRect d5rectT + + for index := 0; index < d5numDims; index++ { + newRect.min[index] = d5fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d5fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d5splitNode(node *d5nodeT, branch *d5branchT, newNode **d5nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d5partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d5getBranches(node, branch, parVars) + + // Find partition + d5choosePartition(parVars, d5minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d5nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d5loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d5rectVolume(rect *d5rectT) float64 { + var volume float64 = 1 + for index := 0; index < d5numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d5rectT +func d5rectSphericalVolume(rect *d5rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d5numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d5numDims == 5 { + return (radius * radius * radius * radius * radius * d5unitSphereVolume) + } else if d5numDims == 4 { + return (radius * radius * radius * radius * d5unitSphereVolume) + } else if d5numDims == 3 { + return (radius * radius * radius * d5unitSphereVolume) + } else if d5numDims == 2 { + return (radius * radius * d5unitSphereVolume) + } else { + return (math.Pow(radius, d5numDims) * d5unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d5calcRectVolume(rect *d5rectT) float64 { + if d5useSphericalVolume { + return d5rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d5rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d5getBranches(node *d5nodeT, branch *d5branchT, parVars *d5partitionVarsT) { + // Load the branch buffer + for index := 0; index < d5maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d5maxNodes] = *branch + parVars.branchCount = d5maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d5maxNodes+1; index++ { + parVars.coverSplit = d5combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d5calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d5choosePartition(parVars *d5partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d5initParVars(parVars, parVars.branchCount, minFill) + d5pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d5notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d5combineRect(curRect, &parVars.cover[0]) + rect1 := d5combineRect(curRect, &parVars.cover[1]) + growth0 := d5calcRectVolume(&rect0) - parVars.area[0] + growth1 := d5calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d5classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d5notTaken == parVars.partition[index] { + d5classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d5loadNodes(nodeA, nodeB *d5nodeT, parVars *d5partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d5nodeT{nodeA, nodeB} + + // It is assured that d5addBranch here will not cause a node split. + d5addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d5partitionVarsT structure. +func d5initParVars(parVars *d5partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d5notTaken + } +} + +func d5pickSeeds(parVars *d5partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d5maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d5calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d5combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d5calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d5classify(seed0, 0, parVars) + d5classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d5classify(index, group int, parVars *d5partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d5combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d5calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d5rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d5removeRect provides for eliminating the root. +func d5removeRect(rect *d5rectT, id interface{}, root **d5nodeT) bool { + var reInsertList *d5listNodeT + + if !d5removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d5insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d5removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d5removeRectRec(rect *d5rectT, id interface{}, node *d5nodeT, listNode **d5listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d5overlap(*rect, node.branch[index].rect) { + if !d5removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d5minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d5nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d5reInsert(node.branch[index].child, listNode) + d5disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d5disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d5overlap. +func d5overlap(rectA, rectB d5rectT) bool { + for index := 0; index < d5numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d5reInsert(node *d5nodeT, listNode **d5listNodeT) { + newListNode := &d5listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d5search in an index tree or subtree for all data retangles that d5overlap the argument rectangle. +func d5search(node *d5nodeT, rect d5rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d5overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d5search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d5overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d6fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d6fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d6numDims = 6 + d6maxNodes = 8 + d6minNodes = d6maxNodes / 2 + d6useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d6unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d6numDims] + +type d6RTree struct { + root *d6nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d6rectT struct { + min [d6numDims]float64 ///< Min dimensions of bounding box + max [d6numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d6branchT struct { + rect d6rectT ///< Bounds + child *d6nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d6nodeT for each branch level +type d6nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d6maxNodes]d6branchT ///< Branch +} + +func (node *d6nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d6nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d6listNodeT struct { + next *d6listNodeT ///< Next in list + node *d6nodeT ///< Node +} + +const d6notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d6partitionVarsT struct { + partition [d6maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d6rectT + area [2]float64 + + branchBuf [d6maxNodes + 1]d6branchT + branchCount int + coverSplit d6rectT + coverSplitArea float64 +} + +func d6New() *d6RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d6RTree{ + root: &d6nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d6RTree) Insert(min, max [d6numDims]float64, dataId interface{}) { + var branch d6branchT + branch.data = dataId + for axis := 0; axis < d6numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d6insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d6RTree) Remove(min, max [d6numDims]float64, dataId interface{}) { + var rect d6rectT + for axis := 0; axis < d6numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d6removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d6search rectangle +/// \param a_min Min of d6search bounding rect +/// \param a_max Max of d6search bounding rect +/// \param a_searchResult d6search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d6RTree) Search(min, max [d6numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d6rectT + for axis := 0; axis < d6numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d6search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d6RTree) Count() int { + var count int + d6countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d6RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d6nodeT{} +} + +func d6countRec(node *d6nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d6countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d6insertRectRec(branch *d6branchT, node *d6nodeT, newNode **d6nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d6nodeT + //var newBranch d6branchT + + // find the optimal branch for this record + index := d6pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d6insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d6combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d6nodeCover(node.branch[index].child) + var newBranch d6branchT + newBranch.child = otherNode + newBranch.rect = d6nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d6addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d6addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d6insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d6insertRect(branch *d6branchT, root **d6nodeT, level int) bool { + var newNode *d6nodeT + + if d6insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d6nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d6branchT + + // add old root node as a child of the new root + newBranch.rect = d6nodeCover(*root) + newBranch.child = *root + d6addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d6nodeCover(newNode) + newBranch.child = newNode + d6addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d6nodeCover(node *d6nodeT) d6rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d6combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d6addBranch(branch *d6branchT, node *d6nodeT, newNode **d6nodeT) bool { + if node.count < d6maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d6splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d6disconnectBranch(node *d6nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d6pickBranch(rect *d6rectT, node *d6nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d6rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d6calcRectVolume(curRect) + tempRect = d6combineRect(rect, curRect) + increase = d6calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d6combineRect(rectA, rectB *d6rectT) d6rectT { + var newRect d6rectT + + for index := 0; index < d6numDims; index++ { + newRect.min[index] = d6fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d6fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d6splitNode(node *d6nodeT, branch *d6branchT, newNode **d6nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d6partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d6getBranches(node, branch, parVars) + + // Find partition + d6choosePartition(parVars, d6minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d6nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d6loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d6rectVolume(rect *d6rectT) float64 { + var volume float64 = 1 + for index := 0; index < d6numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d6rectT +func d6rectSphericalVolume(rect *d6rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d6numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d6numDims == 5 { + return (radius * radius * radius * radius * radius * d6unitSphereVolume) + } else if d6numDims == 4 { + return (radius * radius * radius * radius * d6unitSphereVolume) + } else if d6numDims == 3 { + return (radius * radius * radius * d6unitSphereVolume) + } else if d6numDims == 2 { + return (radius * radius * d6unitSphereVolume) + } else { + return (math.Pow(radius, d6numDims) * d6unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d6calcRectVolume(rect *d6rectT) float64 { + if d6useSphericalVolume { + return d6rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d6rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d6getBranches(node *d6nodeT, branch *d6branchT, parVars *d6partitionVarsT) { + // Load the branch buffer + for index := 0; index < d6maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d6maxNodes] = *branch + parVars.branchCount = d6maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d6maxNodes+1; index++ { + parVars.coverSplit = d6combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d6calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d6choosePartition(parVars *d6partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d6initParVars(parVars, parVars.branchCount, minFill) + d6pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d6notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d6combineRect(curRect, &parVars.cover[0]) + rect1 := d6combineRect(curRect, &parVars.cover[1]) + growth0 := d6calcRectVolume(&rect0) - parVars.area[0] + growth1 := d6calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d6classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d6notTaken == parVars.partition[index] { + d6classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d6loadNodes(nodeA, nodeB *d6nodeT, parVars *d6partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d6nodeT{nodeA, nodeB} + + // It is assured that d6addBranch here will not cause a node split. + d6addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d6partitionVarsT structure. +func d6initParVars(parVars *d6partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d6notTaken + } +} + +func d6pickSeeds(parVars *d6partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d6maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d6calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d6combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d6calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d6classify(seed0, 0, parVars) + d6classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d6classify(index, group int, parVars *d6partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d6combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d6calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d6rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d6removeRect provides for eliminating the root. +func d6removeRect(rect *d6rectT, id interface{}, root **d6nodeT) bool { + var reInsertList *d6listNodeT + + if !d6removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d6insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d6removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d6removeRectRec(rect *d6rectT, id interface{}, node *d6nodeT, listNode **d6listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d6overlap(*rect, node.branch[index].rect) { + if !d6removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d6minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d6nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d6reInsert(node.branch[index].child, listNode) + d6disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d6disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d6overlap. +func d6overlap(rectA, rectB d6rectT) bool { + for index := 0; index < d6numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d6reInsert(node *d6nodeT, listNode **d6listNodeT) { + newListNode := &d6listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d6search in an index tree or subtree for all data retangles that d6overlap the argument rectangle. +func d6search(node *d6nodeT, rect d6rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d6overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d6search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d6overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d7fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d7fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d7numDims = 7 + d7maxNodes = 8 + d7minNodes = d7maxNodes / 2 + d7useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d7unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d7numDims] + +type d7RTree struct { + root *d7nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d7rectT struct { + min [d7numDims]float64 ///< Min dimensions of bounding box + max [d7numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d7branchT struct { + rect d7rectT ///< Bounds + child *d7nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d7nodeT for each branch level +type d7nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d7maxNodes]d7branchT ///< Branch +} + +func (node *d7nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d7nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d7listNodeT struct { + next *d7listNodeT ///< Next in list + node *d7nodeT ///< Node +} + +const d7notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d7partitionVarsT struct { + partition [d7maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d7rectT + area [2]float64 + + branchBuf [d7maxNodes + 1]d7branchT + branchCount int + coverSplit d7rectT + coverSplitArea float64 +} + +func d7New() *d7RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d7RTree{ + root: &d7nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d7RTree) Insert(min, max [d7numDims]float64, dataId interface{}) { + var branch d7branchT + branch.data = dataId + for axis := 0; axis < d7numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d7insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d7RTree) Remove(min, max [d7numDims]float64, dataId interface{}) { + var rect d7rectT + for axis := 0; axis < d7numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d7removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d7search rectangle +/// \param a_min Min of d7search bounding rect +/// \param a_max Max of d7search bounding rect +/// \param a_searchResult d7search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d7RTree) Search(min, max [d7numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d7rectT + for axis := 0; axis < d7numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d7search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d7RTree) Count() int { + var count int + d7countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d7RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d7nodeT{} +} + +func d7countRec(node *d7nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d7countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d7insertRectRec(branch *d7branchT, node *d7nodeT, newNode **d7nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d7nodeT + //var newBranch d7branchT + + // find the optimal branch for this record + index := d7pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d7insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d7combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d7nodeCover(node.branch[index].child) + var newBranch d7branchT + newBranch.child = otherNode + newBranch.rect = d7nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d7addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d7addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d7insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d7insertRect(branch *d7branchT, root **d7nodeT, level int) bool { + var newNode *d7nodeT + + if d7insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d7nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d7branchT + + // add old root node as a child of the new root + newBranch.rect = d7nodeCover(*root) + newBranch.child = *root + d7addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d7nodeCover(newNode) + newBranch.child = newNode + d7addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d7nodeCover(node *d7nodeT) d7rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d7combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d7addBranch(branch *d7branchT, node *d7nodeT, newNode **d7nodeT) bool { + if node.count < d7maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d7splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d7disconnectBranch(node *d7nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d7pickBranch(rect *d7rectT, node *d7nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d7rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d7calcRectVolume(curRect) + tempRect = d7combineRect(rect, curRect) + increase = d7calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d7combineRect(rectA, rectB *d7rectT) d7rectT { + var newRect d7rectT + + for index := 0; index < d7numDims; index++ { + newRect.min[index] = d7fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d7fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d7splitNode(node *d7nodeT, branch *d7branchT, newNode **d7nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d7partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d7getBranches(node, branch, parVars) + + // Find partition + d7choosePartition(parVars, d7minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d7nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d7loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d7rectVolume(rect *d7rectT) float64 { + var volume float64 = 1 + for index := 0; index < d7numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d7rectT +func d7rectSphericalVolume(rect *d7rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d7numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d7numDims == 5 { + return (radius * radius * radius * radius * radius * d7unitSphereVolume) + } else if d7numDims == 4 { + return (radius * radius * radius * radius * d7unitSphereVolume) + } else if d7numDims == 3 { + return (radius * radius * radius * d7unitSphereVolume) + } else if d7numDims == 2 { + return (radius * radius * d7unitSphereVolume) + } else { + return (math.Pow(radius, d7numDims) * d7unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d7calcRectVolume(rect *d7rectT) float64 { + if d7useSphericalVolume { + return d7rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d7rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d7getBranches(node *d7nodeT, branch *d7branchT, parVars *d7partitionVarsT) { + // Load the branch buffer + for index := 0; index < d7maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d7maxNodes] = *branch + parVars.branchCount = d7maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d7maxNodes+1; index++ { + parVars.coverSplit = d7combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d7calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d7choosePartition(parVars *d7partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d7initParVars(parVars, parVars.branchCount, minFill) + d7pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d7notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d7combineRect(curRect, &parVars.cover[0]) + rect1 := d7combineRect(curRect, &parVars.cover[1]) + growth0 := d7calcRectVolume(&rect0) - parVars.area[0] + growth1 := d7calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d7classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d7notTaken == parVars.partition[index] { + d7classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d7loadNodes(nodeA, nodeB *d7nodeT, parVars *d7partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d7nodeT{nodeA, nodeB} + + // It is assured that d7addBranch here will not cause a node split. + d7addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d7partitionVarsT structure. +func d7initParVars(parVars *d7partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d7notTaken + } +} + +func d7pickSeeds(parVars *d7partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d7maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d7calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d7combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d7calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d7classify(seed0, 0, parVars) + d7classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d7classify(index, group int, parVars *d7partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d7combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d7calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d7rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d7removeRect provides for eliminating the root. +func d7removeRect(rect *d7rectT, id interface{}, root **d7nodeT) bool { + var reInsertList *d7listNodeT + + if !d7removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d7insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d7removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d7removeRectRec(rect *d7rectT, id interface{}, node *d7nodeT, listNode **d7listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d7overlap(*rect, node.branch[index].rect) { + if !d7removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d7minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d7nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d7reInsert(node.branch[index].child, listNode) + d7disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d7disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d7overlap. +func d7overlap(rectA, rectB d7rectT) bool { + for index := 0; index < d7numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d7reInsert(node *d7nodeT, listNode **d7listNodeT) { + newListNode := &d7listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d7search in an index tree or subtree for all data retangles that d7overlap the argument rectangle. +func d7search(node *d7nodeT, rect d7rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d7overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d7search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d7overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d8fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d8fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d8numDims = 8 + d8maxNodes = 8 + d8minNodes = d8maxNodes / 2 + d8useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d8unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d8numDims] + +type d8RTree struct { + root *d8nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d8rectT struct { + min [d8numDims]float64 ///< Min dimensions of bounding box + max [d8numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d8branchT struct { + rect d8rectT ///< Bounds + child *d8nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d8nodeT for each branch level +type d8nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d8maxNodes]d8branchT ///< Branch +} + +func (node *d8nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d8nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d8listNodeT struct { + next *d8listNodeT ///< Next in list + node *d8nodeT ///< Node +} + +const d8notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d8partitionVarsT struct { + partition [d8maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d8rectT + area [2]float64 + + branchBuf [d8maxNodes + 1]d8branchT + branchCount int + coverSplit d8rectT + coverSplitArea float64 +} + +func d8New() *d8RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d8RTree{ + root: &d8nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d8RTree) Insert(min, max [d8numDims]float64, dataId interface{}) { + var branch d8branchT + branch.data = dataId + for axis := 0; axis < d8numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d8insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d8RTree) Remove(min, max [d8numDims]float64, dataId interface{}) { + var rect d8rectT + for axis := 0; axis < d8numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d8removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d8search rectangle +/// \param a_min Min of d8search bounding rect +/// \param a_max Max of d8search bounding rect +/// \param a_searchResult d8search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d8RTree) Search(min, max [d8numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d8rectT + for axis := 0; axis < d8numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d8search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d8RTree) Count() int { + var count int + d8countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d8RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d8nodeT{} +} + +func d8countRec(node *d8nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d8countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d8insertRectRec(branch *d8branchT, node *d8nodeT, newNode **d8nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d8nodeT + //var newBranch d8branchT + + // find the optimal branch for this record + index := d8pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d8insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d8combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d8nodeCover(node.branch[index].child) + var newBranch d8branchT + newBranch.child = otherNode + newBranch.rect = d8nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d8addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d8addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d8insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d8insertRect(branch *d8branchT, root **d8nodeT, level int) bool { + var newNode *d8nodeT + + if d8insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d8nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d8branchT + + // add old root node as a child of the new root + newBranch.rect = d8nodeCover(*root) + newBranch.child = *root + d8addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d8nodeCover(newNode) + newBranch.child = newNode + d8addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d8nodeCover(node *d8nodeT) d8rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d8combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d8addBranch(branch *d8branchT, node *d8nodeT, newNode **d8nodeT) bool { + if node.count < d8maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d8splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d8disconnectBranch(node *d8nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d8pickBranch(rect *d8rectT, node *d8nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d8rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d8calcRectVolume(curRect) + tempRect = d8combineRect(rect, curRect) + increase = d8calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d8combineRect(rectA, rectB *d8rectT) d8rectT { + var newRect d8rectT + + for index := 0; index < d8numDims; index++ { + newRect.min[index] = d8fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d8fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d8splitNode(node *d8nodeT, branch *d8branchT, newNode **d8nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d8partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d8getBranches(node, branch, parVars) + + // Find partition + d8choosePartition(parVars, d8minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d8nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d8loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d8rectVolume(rect *d8rectT) float64 { + var volume float64 = 1 + for index := 0; index < d8numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d8rectT +func d8rectSphericalVolume(rect *d8rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d8numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d8numDims == 5 { + return (radius * radius * radius * radius * radius * d8unitSphereVolume) + } else if d8numDims == 4 { + return (radius * radius * radius * radius * d8unitSphereVolume) + } else if d8numDims == 3 { + return (radius * radius * radius * d8unitSphereVolume) + } else if d8numDims == 2 { + return (radius * radius * d8unitSphereVolume) + } else { + return (math.Pow(radius, d8numDims) * d8unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d8calcRectVolume(rect *d8rectT) float64 { + if d8useSphericalVolume { + return d8rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d8rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d8getBranches(node *d8nodeT, branch *d8branchT, parVars *d8partitionVarsT) { + // Load the branch buffer + for index := 0; index < d8maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d8maxNodes] = *branch + parVars.branchCount = d8maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d8maxNodes+1; index++ { + parVars.coverSplit = d8combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d8calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d8choosePartition(parVars *d8partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d8initParVars(parVars, parVars.branchCount, minFill) + d8pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d8notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d8combineRect(curRect, &parVars.cover[0]) + rect1 := d8combineRect(curRect, &parVars.cover[1]) + growth0 := d8calcRectVolume(&rect0) - parVars.area[0] + growth1 := d8calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d8classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d8notTaken == parVars.partition[index] { + d8classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d8loadNodes(nodeA, nodeB *d8nodeT, parVars *d8partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d8nodeT{nodeA, nodeB} + + // It is assured that d8addBranch here will not cause a node split. + d8addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d8partitionVarsT structure. +func d8initParVars(parVars *d8partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d8notTaken + } +} + +func d8pickSeeds(parVars *d8partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d8maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d8calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d8combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d8calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d8classify(seed0, 0, parVars) + d8classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d8classify(index, group int, parVars *d8partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d8combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d8calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d8rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d8removeRect provides for eliminating the root. +func d8removeRect(rect *d8rectT, id interface{}, root **d8nodeT) bool { + var reInsertList *d8listNodeT + + if !d8removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d8insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d8removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d8removeRectRec(rect *d8rectT, id interface{}, node *d8nodeT, listNode **d8listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d8overlap(*rect, node.branch[index].rect) { + if !d8removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d8minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d8nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d8reInsert(node.branch[index].child, listNode) + d8disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d8disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d8overlap. +func d8overlap(rectA, rectB d8rectT) bool { + for index := 0; index < d8numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d8reInsert(node *d8nodeT, listNode **d8listNodeT) { + newListNode := &d8listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d8search in an index tree or subtree for all data retangles that d8overlap the argument rectangle. +func d8search(node *d8nodeT, rect d8rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d8overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d8search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d8overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d9fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d9fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d9numDims = 9 + d9maxNodes = 8 + d9minNodes = d9maxNodes / 2 + d9useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d9unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d9numDims] + +type d9RTree struct { + root *d9nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d9rectT struct { + min [d9numDims]float64 ///< Min dimensions of bounding box + max [d9numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d9branchT struct { + rect d9rectT ///< Bounds + child *d9nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d9nodeT for each branch level +type d9nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d9maxNodes]d9branchT ///< Branch +} + +func (node *d9nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d9nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d9listNodeT struct { + next *d9listNodeT ///< Next in list + node *d9nodeT ///< Node +} + +const d9notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d9partitionVarsT struct { + partition [d9maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d9rectT + area [2]float64 + + branchBuf [d9maxNodes + 1]d9branchT + branchCount int + coverSplit d9rectT + coverSplitArea float64 +} + +func d9New() *d9RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d9RTree{ + root: &d9nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d9RTree) Insert(min, max [d9numDims]float64, dataId interface{}) { + var branch d9branchT + branch.data = dataId + for axis := 0; axis < d9numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d9insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d9RTree) Remove(min, max [d9numDims]float64, dataId interface{}) { + var rect d9rectT + for axis := 0; axis < d9numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d9removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d9search rectangle +/// \param a_min Min of d9search bounding rect +/// \param a_max Max of d9search bounding rect +/// \param a_searchResult d9search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d9RTree) Search(min, max [d9numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d9rectT + for axis := 0; axis < d9numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d9search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d9RTree) Count() int { + var count int + d9countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d9RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d9nodeT{} +} + +func d9countRec(node *d9nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d9countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d9insertRectRec(branch *d9branchT, node *d9nodeT, newNode **d9nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d9nodeT + //var newBranch d9branchT + + // find the optimal branch for this record + index := d9pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d9insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d9combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d9nodeCover(node.branch[index].child) + var newBranch d9branchT + newBranch.child = otherNode + newBranch.rect = d9nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d9addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d9addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d9insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d9insertRect(branch *d9branchT, root **d9nodeT, level int) bool { + var newNode *d9nodeT + + if d9insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d9nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d9branchT + + // add old root node as a child of the new root + newBranch.rect = d9nodeCover(*root) + newBranch.child = *root + d9addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d9nodeCover(newNode) + newBranch.child = newNode + d9addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d9nodeCover(node *d9nodeT) d9rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d9combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d9addBranch(branch *d9branchT, node *d9nodeT, newNode **d9nodeT) bool { + if node.count < d9maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d9splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d9disconnectBranch(node *d9nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d9pickBranch(rect *d9rectT, node *d9nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d9rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d9calcRectVolume(curRect) + tempRect = d9combineRect(rect, curRect) + increase = d9calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d9combineRect(rectA, rectB *d9rectT) d9rectT { + var newRect d9rectT + + for index := 0; index < d9numDims; index++ { + newRect.min[index] = d9fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d9fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d9splitNode(node *d9nodeT, branch *d9branchT, newNode **d9nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d9partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d9getBranches(node, branch, parVars) + + // Find partition + d9choosePartition(parVars, d9minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d9nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d9loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d9rectVolume(rect *d9rectT) float64 { + var volume float64 = 1 + for index := 0; index < d9numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d9rectT +func d9rectSphericalVolume(rect *d9rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d9numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d9numDims == 5 { + return (radius * radius * radius * radius * radius * d9unitSphereVolume) + } else if d9numDims == 4 { + return (radius * radius * radius * radius * d9unitSphereVolume) + } else if d9numDims == 3 { + return (radius * radius * radius * d9unitSphereVolume) + } else if d9numDims == 2 { + return (radius * radius * d9unitSphereVolume) + } else { + return (math.Pow(radius, d9numDims) * d9unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d9calcRectVolume(rect *d9rectT) float64 { + if d9useSphericalVolume { + return d9rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d9rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d9getBranches(node *d9nodeT, branch *d9branchT, parVars *d9partitionVarsT) { + // Load the branch buffer + for index := 0; index < d9maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d9maxNodes] = *branch + parVars.branchCount = d9maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d9maxNodes+1; index++ { + parVars.coverSplit = d9combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d9calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d9choosePartition(parVars *d9partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d9initParVars(parVars, parVars.branchCount, minFill) + d9pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d9notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d9combineRect(curRect, &parVars.cover[0]) + rect1 := d9combineRect(curRect, &parVars.cover[1]) + growth0 := d9calcRectVolume(&rect0) - parVars.area[0] + growth1 := d9calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d9classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d9notTaken == parVars.partition[index] { + d9classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d9loadNodes(nodeA, nodeB *d9nodeT, parVars *d9partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d9nodeT{nodeA, nodeB} + + // It is assured that d9addBranch here will not cause a node split. + d9addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d9partitionVarsT structure. +func d9initParVars(parVars *d9partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d9notTaken + } +} + +func d9pickSeeds(parVars *d9partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d9maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d9calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d9combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d9calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d9classify(seed0, 0, parVars) + d9classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d9classify(index, group int, parVars *d9partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d9combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d9calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d9rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d9removeRect provides for eliminating the root. +func d9removeRect(rect *d9rectT, id interface{}, root **d9nodeT) bool { + var reInsertList *d9listNodeT + + if !d9removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d9insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d9removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d9removeRectRec(rect *d9rectT, id interface{}, node *d9nodeT, listNode **d9listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d9overlap(*rect, node.branch[index].rect) { + if !d9removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d9minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d9nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d9reInsert(node.branch[index].child, listNode) + d9disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d9disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d9overlap. +func d9overlap(rectA, rectB d9rectT) bool { + for index := 0; index < d9numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d9reInsert(node *d9nodeT, listNode **d9listNodeT) { + newListNode := &d9listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d9search in an index tree or subtree for all data retangles that d9overlap the argument rectangle. +func d9search(node *d9nodeT, rect d9rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d9overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d9search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d9overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d10fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d10fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d10numDims = 10 + d10maxNodes = 8 + d10minNodes = d10maxNodes / 2 + d10useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d10unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d10numDims] + +type d10RTree struct { + root *d10nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d10rectT struct { + min [d10numDims]float64 ///< Min dimensions of bounding box + max [d10numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d10branchT struct { + rect d10rectT ///< Bounds + child *d10nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d10nodeT for each branch level +type d10nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d10maxNodes]d10branchT ///< Branch +} + +func (node *d10nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d10nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d10listNodeT struct { + next *d10listNodeT ///< Next in list + node *d10nodeT ///< Node +} + +const d10notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d10partitionVarsT struct { + partition [d10maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d10rectT + area [2]float64 + + branchBuf [d10maxNodes + 1]d10branchT + branchCount int + coverSplit d10rectT + coverSplitArea float64 +} + +func d10New() *d10RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d10RTree{ + root: &d10nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d10RTree) Insert(min, max [d10numDims]float64, dataId interface{}) { + var branch d10branchT + branch.data = dataId + for axis := 0; axis < d10numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d10insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d10RTree) Remove(min, max [d10numDims]float64, dataId interface{}) { + var rect d10rectT + for axis := 0; axis < d10numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d10removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d10search rectangle +/// \param a_min Min of d10search bounding rect +/// \param a_max Max of d10search bounding rect +/// \param a_searchResult d10search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d10RTree) Search(min, max [d10numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d10rectT + for axis := 0; axis < d10numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d10search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d10RTree) Count() int { + var count int + d10countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d10RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d10nodeT{} +} + +func d10countRec(node *d10nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d10countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d10insertRectRec(branch *d10branchT, node *d10nodeT, newNode **d10nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d10nodeT + //var newBranch d10branchT + + // find the optimal branch for this record + index := d10pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d10insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d10combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d10nodeCover(node.branch[index].child) + var newBranch d10branchT + newBranch.child = otherNode + newBranch.rect = d10nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d10addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d10addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d10insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d10insertRect(branch *d10branchT, root **d10nodeT, level int) bool { + var newNode *d10nodeT + + if d10insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d10nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d10branchT + + // add old root node as a child of the new root + newBranch.rect = d10nodeCover(*root) + newBranch.child = *root + d10addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d10nodeCover(newNode) + newBranch.child = newNode + d10addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d10nodeCover(node *d10nodeT) d10rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d10combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d10addBranch(branch *d10branchT, node *d10nodeT, newNode **d10nodeT) bool { + if node.count < d10maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d10splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d10disconnectBranch(node *d10nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d10pickBranch(rect *d10rectT, node *d10nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d10rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d10calcRectVolume(curRect) + tempRect = d10combineRect(rect, curRect) + increase = d10calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d10combineRect(rectA, rectB *d10rectT) d10rectT { + var newRect d10rectT + + for index := 0; index < d10numDims; index++ { + newRect.min[index] = d10fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d10fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d10splitNode(node *d10nodeT, branch *d10branchT, newNode **d10nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d10partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d10getBranches(node, branch, parVars) + + // Find partition + d10choosePartition(parVars, d10minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d10nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d10loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d10rectVolume(rect *d10rectT) float64 { + var volume float64 = 1 + for index := 0; index < d10numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d10rectT +func d10rectSphericalVolume(rect *d10rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d10numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d10numDims == 5 { + return (radius * radius * radius * radius * radius * d10unitSphereVolume) + } else if d10numDims == 4 { + return (radius * radius * radius * radius * d10unitSphereVolume) + } else if d10numDims == 3 { + return (radius * radius * radius * d10unitSphereVolume) + } else if d10numDims == 2 { + return (radius * radius * d10unitSphereVolume) + } else { + return (math.Pow(radius, d10numDims) * d10unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d10calcRectVolume(rect *d10rectT) float64 { + if d10useSphericalVolume { + return d10rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d10rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d10getBranches(node *d10nodeT, branch *d10branchT, parVars *d10partitionVarsT) { + // Load the branch buffer + for index := 0; index < d10maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d10maxNodes] = *branch + parVars.branchCount = d10maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d10maxNodes+1; index++ { + parVars.coverSplit = d10combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d10calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d10choosePartition(parVars *d10partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d10initParVars(parVars, parVars.branchCount, minFill) + d10pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d10notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d10combineRect(curRect, &parVars.cover[0]) + rect1 := d10combineRect(curRect, &parVars.cover[1]) + growth0 := d10calcRectVolume(&rect0) - parVars.area[0] + growth1 := d10calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d10classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d10notTaken == parVars.partition[index] { + d10classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d10loadNodes(nodeA, nodeB *d10nodeT, parVars *d10partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d10nodeT{nodeA, nodeB} + + // It is assured that d10addBranch here will not cause a node split. + d10addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d10partitionVarsT structure. +func d10initParVars(parVars *d10partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d10notTaken + } +} + +func d10pickSeeds(parVars *d10partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d10maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d10calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d10combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d10calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d10classify(seed0, 0, parVars) + d10classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d10classify(index, group int, parVars *d10partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d10combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d10calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d10rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d10removeRect provides for eliminating the root. +func d10removeRect(rect *d10rectT, id interface{}, root **d10nodeT) bool { + var reInsertList *d10listNodeT + + if !d10removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d10insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d10removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d10removeRectRec(rect *d10rectT, id interface{}, node *d10nodeT, listNode **d10listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d10overlap(*rect, node.branch[index].rect) { + if !d10removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d10minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d10nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d10reInsert(node.branch[index].child, listNode) + d10disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d10disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d10overlap. +func d10overlap(rectA, rectB d10rectT) bool { + for index := 0; index < d10numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d10reInsert(node *d10nodeT, listNode **d10listNodeT) { + newListNode := &d10listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d10search in an index tree or subtree for all data retangles that d10overlap the argument rectangle. +func d10search(node *d10nodeT, rect d10rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d10overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d10search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d10overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d11fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d11fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d11numDims = 11 + d11maxNodes = 8 + d11minNodes = d11maxNodes / 2 + d11useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d11unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d11numDims] + +type d11RTree struct { + root *d11nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d11rectT struct { + min [d11numDims]float64 ///< Min dimensions of bounding box + max [d11numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d11branchT struct { + rect d11rectT ///< Bounds + child *d11nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d11nodeT for each branch level +type d11nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d11maxNodes]d11branchT ///< Branch +} + +func (node *d11nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d11nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d11listNodeT struct { + next *d11listNodeT ///< Next in list + node *d11nodeT ///< Node +} + +const d11notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d11partitionVarsT struct { + partition [d11maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d11rectT + area [2]float64 + + branchBuf [d11maxNodes + 1]d11branchT + branchCount int + coverSplit d11rectT + coverSplitArea float64 +} + +func d11New() *d11RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d11RTree{ + root: &d11nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d11RTree) Insert(min, max [d11numDims]float64, dataId interface{}) { + var branch d11branchT + branch.data = dataId + for axis := 0; axis < d11numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d11insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d11RTree) Remove(min, max [d11numDims]float64, dataId interface{}) { + var rect d11rectT + for axis := 0; axis < d11numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d11removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d11search rectangle +/// \param a_min Min of d11search bounding rect +/// \param a_max Max of d11search bounding rect +/// \param a_searchResult d11search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d11RTree) Search(min, max [d11numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d11rectT + for axis := 0; axis < d11numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d11search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d11RTree) Count() int { + var count int + d11countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d11RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d11nodeT{} +} + +func d11countRec(node *d11nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d11countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d11insertRectRec(branch *d11branchT, node *d11nodeT, newNode **d11nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d11nodeT + //var newBranch d11branchT + + // find the optimal branch for this record + index := d11pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d11insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d11combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d11nodeCover(node.branch[index].child) + var newBranch d11branchT + newBranch.child = otherNode + newBranch.rect = d11nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d11addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d11addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d11insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d11insertRect(branch *d11branchT, root **d11nodeT, level int) bool { + var newNode *d11nodeT + + if d11insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d11nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d11branchT + + // add old root node as a child of the new root + newBranch.rect = d11nodeCover(*root) + newBranch.child = *root + d11addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d11nodeCover(newNode) + newBranch.child = newNode + d11addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d11nodeCover(node *d11nodeT) d11rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d11combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d11addBranch(branch *d11branchT, node *d11nodeT, newNode **d11nodeT) bool { + if node.count < d11maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d11splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d11disconnectBranch(node *d11nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d11pickBranch(rect *d11rectT, node *d11nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d11rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d11calcRectVolume(curRect) + tempRect = d11combineRect(rect, curRect) + increase = d11calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d11combineRect(rectA, rectB *d11rectT) d11rectT { + var newRect d11rectT + + for index := 0; index < d11numDims; index++ { + newRect.min[index] = d11fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d11fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d11splitNode(node *d11nodeT, branch *d11branchT, newNode **d11nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d11partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d11getBranches(node, branch, parVars) + + // Find partition + d11choosePartition(parVars, d11minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d11nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d11loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d11rectVolume(rect *d11rectT) float64 { + var volume float64 = 1 + for index := 0; index < d11numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d11rectT +func d11rectSphericalVolume(rect *d11rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d11numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d11numDims == 5 { + return (radius * radius * radius * radius * radius * d11unitSphereVolume) + } else if d11numDims == 4 { + return (radius * radius * radius * radius * d11unitSphereVolume) + } else if d11numDims == 3 { + return (radius * radius * radius * d11unitSphereVolume) + } else if d11numDims == 2 { + return (radius * radius * d11unitSphereVolume) + } else { + return (math.Pow(radius, d11numDims) * d11unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d11calcRectVolume(rect *d11rectT) float64 { + if d11useSphericalVolume { + return d11rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d11rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d11getBranches(node *d11nodeT, branch *d11branchT, parVars *d11partitionVarsT) { + // Load the branch buffer + for index := 0; index < d11maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d11maxNodes] = *branch + parVars.branchCount = d11maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d11maxNodes+1; index++ { + parVars.coverSplit = d11combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d11calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d11choosePartition(parVars *d11partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d11initParVars(parVars, parVars.branchCount, minFill) + d11pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d11notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d11combineRect(curRect, &parVars.cover[0]) + rect1 := d11combineRect(curRect, &parVars.cover[1]) + growth0 := d11calcRectVolume(&rect0) - parVars.area[0] + growth1 := d11calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d11classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d11notTaken == parVars.partition[index] { + d11classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d11loadNodes(nodeA, nodeB *d11nodeT, parVars *d11partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d11nodeT{nodeA, nodeB} + + // It is assured that d11addBranch here will not cause a node split. + d11addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d11partitionVarsT structure. +func d11initParVars(parVars *d11partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d11notTaken + } +} + +func d11pickSeeds(parVars *d11partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d11maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d11calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d11combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d11calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d11classify(seed0, 0, parVars) + d11classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d11classify(index, group int, parVars *d11partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d11combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d11calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d11rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d11removeRect provides for eliminating the root. +func d11removeRect(rect *d11rectT, id interface{}, root **d11nodeT) bool { + var reInsertList *d11listNodeT + + if !d11removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d11insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d11removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d11removeRectRec(rect *d11rectT, id interface{}, node *d11nodeT, listNode **d11listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d11overlap(*rect, node.branch[index].rect) { + if !d11removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d11minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d11nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d11reInsert(node.branch[index].child, listNode) + d11disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d11disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d11overlap. +func d11overlap(rectA, rectB d11rectT) bool { + for index := 0; index < d11numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d11reInsert(node *d11nodeT, listNode **d11listNodeT) { + newListNode := &d11listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d11search in an index tree or subtree for all data retangles that d11overlap the argument rectangle. +func d11search(node *d11nodeT, rect d11rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d11overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d11search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d11overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d12fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d12fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d12numDims = 12 + d12maxNodes = 8 + d12minNodes = d12maxNodes / 2 + d12useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d12unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d12numDims] + +type d12RTree struct { + root *d12nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d12rectT struct { + min [d12numDims]float64 ///< Min dimensions of bounding box + max [d12numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d12branchT struct { + rect d12rectT ///< Bounds + child *d12nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d12nodeT for each branch level +type d12nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d12maxNodes]d12branchT ///< Branch +} + +func (node *d12nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d12nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d12listNodeT struct { + next *d12listNodeT ///< Next in list + node *d12nodeT ///< Node +} + +const d12notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d12partitionVarsT struct { + partition [d12maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d12rectT + area [2]float64 + + branchBuf [d12maxNodes + 1]d12branchT + branchCount int + coverSplit d12rectT + coverSplitArea float64 +} + +func d12New() *d12RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d12RTree{ + root: &d12nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d12RTree) Insert(min, max [d12numDims]float64, dataId interface{}) { + var branch d12branchT + branch.data = dataId + for axis := 0; axis < d12numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d12insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d12RTree) Remove(min, max [d12numDims]float64, dataId interface{}) { + var rect d12rectT + for axis := 0; axis < d12numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d12removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d12search rectangle +/// \param a_min Min of d12search bounding rect +/// \param a_max Max of d12search bounding rect +/// \param a_searchResult d12search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d12RTree) Search(min, max [d12numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d12rectT + for axis := 0; axis < d12numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d12search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d12RTree) Count() int { + var count int + d12countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d12RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d12nodeT{} +} + +func d12countRec(node *d12nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d12countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d12insertRectRec(branch *d12branchT, node *d12nodeT, newNode **d12nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d12nodeT + //var newBranch d12branchT + + // find the optimal branch for this record + index := d12pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d12insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d12combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d12nodeCover(node.branch[index].child) + var newBranch d12branchT + newBranch.child = otherNode + newBranch.rect = d12nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d12addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d12addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d12insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d12insertRect(branch *d12branchT, root **d12nodeT, level int) bool { + var newNode *d12nodeT + + if d12insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d12nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d12branchT + + // add old root node as a child of the new root + newBranch.rect = d12nodeCover(*root) + newBranch.child = *root + d12addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d12nodeCover(newNode) + newBranch.child = newNode + d12addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d12nodeCover(node *d12nodeT) d12rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d12combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d12addBranch(branch *d12branchT, node *d12nodeT, newNode **d12nodeT) bool { + if node.count < d12maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d12splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d12disconnectBranch(node *d12nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d12pickBranch(rect *d12rectT, node *d12nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d12rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d12calcRectVolume(curRect) + tempRect = d12combineRect(rect, curRect) + increase = d12calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d12combineRect(rectA, rectB *d12rectT) d12rectT { + var newRect d12rectT + + for index := 0; index < d12numDims; index++ { + newRect.min[index] = d12fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d12fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d12splitNode(node *d12nodeT, branch *d12branchT, newNode **d12nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d12partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d12getBranches(node, branch, parVars) + + // Find partition + d12choosePartition(parVars, d12minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d12nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d12loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d12rectVolume(rect *d12rectT) float64 { + var volume float64 = 1 + for index := 0; index < d12numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d12rectT +func d12rectSphericalVolume(rect *d12rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d12numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d12numDims == 5 { + return (radius * radius * radius * radius * radius * d12unitSphereVolume) + } else if d12numDims == 4 { + return (radius * radius * radius * radius * d12unitSphereVolume) + } else if d12numDims == 3 { + return (radius * radius * radius * d12unitSphereVolume) + } else if d12numDims == 2 { + return (radius * radius * d12unitSphereVolume) + } else { + return (math.Pow(radius, d12numDims) * d12unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d12calcRectVolume(rect *d12rectT) float64 { + if d12useSphericalVolume { + return d12rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d12rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d12getBranches(node *d12nodeT, branch *d12branchT, parVars *d12partitionVarsT) { + // Load the branch buffer + for index := 0; index < d12maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d12maxNodes] = *branch + parVars.branchCount = d12maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d12maxNodes+1; index++ { + parVars.coverSplit = d12combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d12calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d12choosePartition(parVars *d12partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d12initParVars(parVars, parVars.branchCount, minFill) + d12pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d12notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d12combineRect(curRect, &parVars.cover[0]) + rect1 := d12combineRect(curRect, &parVars.cover[1]) + growth0 := d12calcRectVolume(&rect0) - parVars.area[0] + growth1 := d12calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d12classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d12notTaken == parVars.partition[index] { + d12classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d12loadNodes(nodeA, nodeB *d12nodeT, parVars *d12partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d12nodeT{nodeA, nodeB} + + // It is assured that d12addBranch here will not cause a node split. + d12addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d12partitionVarsT structure. +func d12initParVars(parVars *d12partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d12notTaken + } +} + +func d12pickSeeds(parVars *d12partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d12maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d12calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d12combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d12calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d12classify(seed0, 0, parVars) + d12classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d12classify(index, group int, parVars *d12partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d12combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d12calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d12rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d12removeRect provides for eliminating the root. +func d12removeRect(rect *d12rectT, id interface{}, root **d12nodeT) bool { + var reInsertList *d12listNodeT + + if !d12removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d12insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d12removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d12removeRectRec(rect *d12rectT, id interface{}, node *d12nodeT, listNode **d12listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d12overlap(*rect, node.branch[index].rect) { + if !d12removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d12minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d12nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d12reInsert(node.branch[index].child, listNode) + d12disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d12disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d12overlap. +func d12overlap(rectA, rectB d12rectT) bool { + for index := 0; index < d12numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d12reInsert(node *d12nodeT, listNode **d12listNodeT) { + newListNode := &d12listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d12search in an index tree or subtree for all data retangles that d12overlap the argument rectangle. +func d12search(node *d12nodeT, rect d12rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d12overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d12search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d12overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d13fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d13fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d13numDims = 13 + d13maxNodes = 8 + d13minNodes = d13maxNodes / 2 + d13useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d13unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d13numDims] + +type d13RTree struct { + root *d13nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d13rectT struct { + min [d13numDims]float64 ///< Min dimensions of bounding box + max [d13numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d13branchT struct { + rect d13rectT ///< Bounds + child *d13nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d13nodeT for each branch level +type d13nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d13maxNodes]d13branchT ///< Branch +} + +func (node *d13nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d13nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d13listNodeT struct { + next *d13listNodeT ///< Next in list + node *d13nodeT ///< Node +} + +const d13notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d13partitionVarsT struct { + partition [d13maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d13rectT + area [2]float64 + + branchBuf [d13maxNodes + 1]d13branchT + branchCount int + coverSplit d13rectT + coverSplitArea float64 +} + +func d13New() *d13RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d13RTree{ + root: &d13nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d13RTree) Insert(min, max [d13numDims]float64, dataId interface{}) { + var branch d13branchT + branch.data = dataId + for axis := 0; axis < d13numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d13insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d13RTree) Remove(min, max [d13numDims]float64, dataId interface{}) { + var rect d13rectT + for axis := 0; axis < d13numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d13removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d13search rectangle +/// \param a_min Min of d13search bounding rect +/// \param a_max Max of d13search bounding rect +/// \param a_searchResult d13search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d13RTree) Search(min, max [d13numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d13rectT + for axis := 0; axis < d13numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d13search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d13RTree) Count() int { + var count int + d13countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d13RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d13nodeT{} +} + +func d13countRec(node *d13nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d13countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d13insertRectRec(branch *d13branchT, node *d13nodeT, newNode **d13nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d13nodeT + //var newBranch d13branchT + + // find the optimal branch for this record + index := d13pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d13insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d13combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d13nodeCover(node.branch[index].child) + var newBranch d13branchT + newBranch.child = otherNode + newBranch.rect = d13nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d13addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d13addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d13insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d13insertRect(branch *d13branchT, root **d13nodeT, level int) bool { + var newNode *d13nodeT + + if d13insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d13nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d13branchT + + // add old root node as a child of the new root + newBranch.rect = d13nodeCover(*root) + newBranch.child = *root + d13addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d13nodeCover(newNode) + newBranch.child = newNode + d13addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d13nodeCover(node *d13nodeT) d13rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d13combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d13addBranch(branch *d13branchT, node *d13nodeT, newNode **d13nodeT) bool { + if node.count < d13maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d13splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d13disconnectBranch(node *d13nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d13pickBranch(rect *d13rectT, node *d13nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d13rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d13calcRectVolume(curRect) + tempRect = d13combineRect(rect, curRect) + increase = d13calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d13combineRect(rectA, rectB *d13rectT) d13rectT { + var newRect d13rectT + + for index := 0; index < d13numDims; index++ { + newRect.min[index] = d13fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d13fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d13splitNode(node *d13nodeT, branch *d13branchT, newNode **d13nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d13partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d13getBranches(node, branch, parVars) + + // Find partition + d13choosePartition(parVars, d13minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d13nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d13loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d13rectVolume(rect *d13rectT) float64 { + var volume float64 = 1 + for index := 0; index < d13numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d13rectT +func d13rectSphericalVolume(rect *d13rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d13numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d13numDims == 5 { + return (radius * radius * radius * radius * radius * d13unitSphereVolume) + } else if d13numDims == 4 { + return (radius * radius * radius * radius * d13unitSphereVolume) + } else if d13numDims == 3 { + return (radius * radius * radius * d13unitSphereVolume) + } else if d13numDims == 2 { + return (radius * radius * d13unitSphereVolume) + } else { + return (math.Pow(radius, d13numDims) * d13unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d13calcRectVolume(rect *d13rectT) float64 { + if d13useSphericalVolume { + return d13rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d13rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d13getBranches(node *d13nodeT, branch *d13branchT, parVars *d13partitionVarsT) { + // Load the branch buffer + for index := 0; index < d13maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d13maxNodes] = *branch + parVars.branchCount = d13maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d13maxNodes+1; index++ { + parVars.coverSplit = d13combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d13calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d13choosePartition(parVars *d13partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d13initParVars(parVars, parVars.branchCount, minFill) + d13pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d13notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d13combineRect(curRect, &parVars.cover[0]) + rect1 := d13combineRect(curRect, &parVars.cover[1]) + growth0 := d13calcRectVolume(&rect0) - parVars.area[0] + growth1 := d13calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d13classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d13notTaken == parVars.partition[index] { + d13classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d13loadNodes(nodeA, nodeB *d13nodeT, parVars *d13partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d13nodeT{nodeA, nodeB} + + // It is assured that d13addBranch here will not cause a node split. + d13addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d13partitionVarsT structure. +func d13initParVars(parVars *d13partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d13notTaken + } +} + +func d13pickSeeds(parVars *d13partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d13maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d13calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d13combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d13calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d13classify(seed0, 0, parVars) + d13classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d13classify(index, group int, parVars *d13partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d13combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d13calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d13rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d13removeRect provides for eliminating the root. +func d13removeRect(rect *d13rectT, id interface{}, root **d13nodeT) bool { + var reInsertList *d13listNodeT + + if !d13removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d13insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d13removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d13removeRectRec(rect *d13rectT, id interface{}, node *d13nodeT, listNode **d13listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d13overlap(*rect, node.branch[index].rect) { + if !d13removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d13minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d13nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d13reInsert(node.branch[index].child, listNode) + d13disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d13disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d13overlap. +func d13overlap(rectA, rectB d13rectT) bool { + for index := 0; index < d13numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d13reInsert(node *d13nodeT, listNode **d13listNodeT) { + newListNode := &d13listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d13search in an index tree or subtree for all data retangles that d13overlap the argument rectangle. +func d13search(node *d13nodeT, rect d13rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d13overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d13search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d13overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d14fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d14fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d14numDims = 14 + d14maxNodes = 8 + d14minNodes = d14maxNodes / 2 + d14useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d14unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d14numDims] + +type d14RTree struct { + root *d14nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d14rectT struct { + min [d14numDims]float64 ///< Min dimensions of bounding box + max [d14numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d14branchT struct { + rect d14rectT ///< Bounds + child *d14nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d14nodeT for each branch level +type d14nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d14maxNodes]d14branchT ///< Branch +} + +func (node *d14nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d14nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d14listNodeT struct { + next *d14listNodeT ///< Next in list + node *d14nodeT ///< Node +} + +const d14notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d14partitionVarsT struct { + partition [d14maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d14rectT + area [2]float64 + + branchBuf [d14maxNodes + 1]d14branchT + branchCount int + coverSplit d14rectT + coverSplitArea float64 +} + +func d14New() *d14RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d14RTree{ + root: &d14nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d14RTree) Insert(min, max [d14numDims]float64, dataId interface{}) { + var branch d14branchT + branch.data = dataId + for axis := 0; axis < d14numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d14insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d14RTree) Remove(min, max [d14numDims]float64, dataId interface{}) { + var rect d14rectT + for axis := 0; axis < d14numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d14removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d14search rectangle +/// \param a_min Min of d14search bounding rect +/// \param a_max Max of d14search bounding rect +/// \param a_searchResult d14search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d14RTree) Search(min, max [d14numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d14rectT + for axis := 0; axis < d14numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d14search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d14RTree) Count() int { + var count int + d14countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d14RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d14nodeT{} +} + +func d14countRec(node *d14nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d14countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d14insertRectRec(branch *d14branchT, node *d14nodeT, newNode **d14nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d14nodeT + //var newBranch d14branchT + + // find the optimal branch for this record + index := d14pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d14insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d14combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d14nodeCover(node.branch[index].child) + var newBranch d14branchT + newBranch.child = otherNode + newBranch.rect = d14nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d14addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d14addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d14insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d14insertRect(branch *d14branchT, root **d14nodeT, level int) bool { + var newNode *d14nodeT + + if d14insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d14nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d14branchT + + // add old root node as a child of the new root + newBranch.rect = d14nodeCover(*root) + newBranch.child = *root + d14addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d14nodeCover(newNode) + newBranch.child = newNode + d14addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d14nodeCover(node *d14nodeT) d14rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d14combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d14addBranch(branch *d14branchT, node *d14nodeT, newNode **d14nodeT) bool { + if node.count < d14maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d14splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d14disconnectBranch(node *d14nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d14pickBranch(rect *d14rectT, node *d14nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d14rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d14calcRectVolume(curRect) + tempRect = d14combineRect(rect, curRect) + increase = d14calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d14combineRect(rectA, rectB *d14rectT) d14rectT { + var newRect d14rectT + + for index := 0; index < d14numDims; index++ { + newRect.min[index] = d14fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d14fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d14splitNode(node *d14nodeT, branch *d14branchT, newNode **d14nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d14partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d14getBranches(node, branch, parVars) + + // Find partition + d14choosePartition(parVars, d14minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d14nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d14loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d14rectVolume(rect *d14rectT) float64 { + var volume float64 = 1 + for index := 0; index < d14numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d14rectT +func d14rectSphericalVolume(rect *d14rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d14numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d14numDims == 5 { + return (radius * radius * radius * radius * radius * d14unitSphereVolume) + } else if d14numDims == 4 { + return (radius * radius * radius * radius * d14unitSphereVolume) + } else if d14numDims == 3 { + return (radius * radius * radius * d14unitSphereVolume) + } else if d14numDims == 2 { + return (radius * radius * d14unitSphereVolume) + } else { + return (math.Pow(radius, d14numDims) * d14unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d14calcRectVolume(rect *d14rectT) float64 { + if d14useSphericalVolume { + return d14rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d14rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d14getBranches(node *d14nodeT, branch *d14branchT, parVars *d14partitionVarsT) { + // Load the branch buffer + for index := 0; index < d14maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d14maxNodes] = *branch + parVars.branchCount = d14maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d14maxNodes+1; index++ { + parVars.coverSplit = d14combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d14calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d14choosePartition(parVars *d14partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d14initParVars(parVars, parVars.branchCount, minFill) + d14pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d14notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d14combineRect(curRect, &parVars.cover[0]) + rect1 := d14combineRect(curRect, &parVars.cover[1]) + growth0 := d14calcRectVolume(&rect0) - parVars.area[0] + growth1 := d14calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d14classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d14notTaken == parVars.partition[index] { + d14classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d14loadNodes(nodeA, nodeB *d14nodeT, parVars *d14partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d14nodeT{nodeA, nodeB} + + // It is assured that d14addBranch here will not cause a node split. + d14addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d14partitionVarsT structure. +func d14initParVars(parVars *d14partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d14notTaken + } +} + +func d14pickSeeds(parVars *d14partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d14maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d14calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d14combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d14calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d14classify(seed0, 0, parVars) + d14classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d14classify(index, group int, parVars *d14partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d14combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d14calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d14rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d14removeRect provides for eliminating the root. +func d14removeRect(rect *d14rectT, id interface{}, root **d14nodeT) bool { + var reInsertList *d14listNodeT + + if !d14removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d14insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d14removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d14removeRectRec(rect *d14rectT, id interface{}, node *d14nodeT, listNode **d14listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d14overlap(*rect, node.branch[index].rect) { + if !d14removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d14minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d14nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d14reInsert(node.branch[index].child, listNode) + d14disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d14disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d14overlap. +func d14overlap(rectA, rectB d14rectT) bool { + for index := 0; index < d14numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d14reInsert(node *d14nodeT, listNode **d14listNodeT) { + newListNode := &d14listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d14search in an index tree or subtree for all data retangles that d14overlap the argument rectangle. +func d14search(node *d14nodeT, rect d14rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d14overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d14search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d14overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d15fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d15fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d15numDims = 15 + d15maxNodes = 8 + d15minNodes = d15maxNodes / 2 + d15useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d15unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d15numDims] + +type d15RTree struct { + root *d15nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d15rectT struct { + min [d15numDims]float64 ///< Min dimensions of bounding box + max [d15numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d15branchT struct { + rect d15rectT ///< Bounds + child *d15nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d15nodeT for each branch level +type d15nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d15maxNodes]d15branchT ///< Branch +} + +func (node *d15nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d15nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d15listNodeT struct { + next *d15listNodeT ///< Next in list + node *d15nodeT ///< Node +} + +const d15notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d15partitionVarsT struct { + partition [d15maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d15rectT + area [2]float64 + + branchBuf [d15maxNodes + 1]d15branchT + branchCount int + coverSplit d15rectT + coverSplitArea float64 +} + +func d15New() *d15RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d15RTree{ + root: &d15nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d15RTree) Insert(min, max [d15numDims]float64, dataId interface{}) { + var branch d15branchT + branch.data = dataId + for axis := 0; axis < d15numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d15insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d15RTree) Remove(min, max [d15numDims]float64, dataId interface{}) { + var rect d15rectT + for axis := 0; axis < d15numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d15removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d15search rectangle +/// \param a_min Min of d15search bounding rect +/// \param a_max Max of d15search bounding rect +/// \param a_searchResult d15search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d15RTree) Search(min, max [d15numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d15rectT + for axis := 0; axis < d15numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d15search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d15RTree) Count() int { + var count int + d15countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d15RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d15nodeT{} +} + +func d15countRec(node *d15nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d15countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d15insertRectRec(branch *d15branchT, node *d15nodeT, newNode **d15nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d15nodeT + //var newBranch d15branchT + + // find the optimal branch for this record + index := d15pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d15insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d15combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d15nodeCover(node.branch[index].child) + var newBranch d15branchT + newBranch.child = otherNode + newBranch.rect = d15nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d15addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d15addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d15insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d15insertRect(branch *d15branchT, root **d15nodeT, level int) bool { + var newNode *d15nodeT + + if d15insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d15nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d15branchT + + // add old root node as a child of the new root + newBranch.rect = d15nodeCover(*root) + newBranch.child = *root + d15addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d15nodeCover(newNode) + newBranch.child = newNode + d15addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d15nodeCover(node *d15nodeT) d15rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d15combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d15addBranch(branch *d15branchT, node *d15nodeT, newNode **d15nodeT) bool { + if node.count < d15maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d15splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d15disconnectBranch(node *d15nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d15pickBranch(rect *d15rectT, node *d15nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d15rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d15calcRectVolume(curRect) + tempRect = d15combineRect(rect, curRect) + increase = d15calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d15combineRect(rectA, rectB *d15rectT) d15rectT { + var newRect d15rectT + + for index := 0; index < d15numDims; index++ { + newRect.min[index] = d15fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d15fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d15splitNode(node *d15nodeT, branch *d15branchT, newNode **d15nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d15partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d15getBranches(node, branch, parVars) + + // Find partition + d15choosePartition(parVars, d15minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d15nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d15loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d15rectVolume(rect *d15rectT) float64 { + var volume float64 = 1 + for index := 0; index < d15numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d15rectT +func d15rectSphericalVolume(rect *d15rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d15numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d15numDims == 5 { + return (radius * radius * radius * radius * radius * d15unitSphereVolume) + } else if d15numDims == 4 { + return (radius * radius * radius * radius * d15unitSphereVolume) + } else if d15numDims == 3 { + return (radius * radius * radius * d15unitSphereVolume) + } else if d15numDims == 2 { + return (radius * radius * d15unitSphereVolume) + } else { + return (math.Pow(radius, d15numDims) * d15unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d15calcRectVolume(rect *d15rectT) float64 { + if d15useSphericalVolume { + return d15rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d15rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d15getBranches(node *d15nodeT, branch *d15branchT, parVars *d15partitionVarsT) { + // Load the branch buffer + for index := 0; index < d15maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d15maxNodes] = *branch + parVars.branchCount = d15maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d15maxNodes+1; index++ { + parVars.coverSplit = d15combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d15calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d15choosePartition(parVars *d15partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d15initParVars(parVars, parVars.branchCount, minFill) + d15pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d15notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d15combineRect(curRect, &parVars.cover[0]) + rect1 := d15combineRect(curRect, &parVars.cover[1]) + growth0 := d15calcRectVolume(&rect0) - parVars.area[0] + growth1 := d15calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d15classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d15notTaken == parVars.partition[index] { + d15classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d15loadNodes(nodeA, nodeB *d15nodeT, parVars *d15partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d15nodeT{nodeA, nodeB} + + // It is assured that d15addBranch here will not cause a node split. + d15addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d15partitionVarsT structure. +func d15initParVars(parVars *d15partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d15notTaken + } +} + +func d15pickSeeds(parVars *d15partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d15maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d15calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d15combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d15calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d15classify(seed0, 0, parVars) + d15classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d15classify(index, group int, parVars *d15partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d15combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d15calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d15rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d15removeRect provides for eliminating the root. +func d15removeRect(rect *d15rectT, id interface{}, root **d15nodeT) bool { + var reInsertList *d15listNodeT + + if !d15removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d15insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d15removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d15removeRectRec(rect *d15rectT, id interface{}, node *d15nodeT, listNode **d15listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d15overlap(*rect, node.branch[index].rect) { + if !d15removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d15minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d15nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d15reInsert(node.branch[index].child, listNode) + d15disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d15disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d15overlap. +func d15overlap(rectA, rectB d15rectT) bool { + for index := 0; index < d15numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d15reInsert(node *d15nodeT, listNode **d15listNodeT) { + newListNode := &d15listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d15search in an index tree or subtree for all data retangles that d15overlap the argument rectangle. +func d15search(node *d15nodeT, rect d15rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d15overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d15search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d15overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d16fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d16fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d16numDims = 16 + d16maxNodes = 8 + d16minNodes = d16maxNodes / 2 + d16useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d16unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d16numDims] + +type d16RTree struct { + root *d16nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d16rectT struct { + min [d16numDims]float64 ///< Min dimensions of bounding box + max [d16numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d16branchT struct { + rect d16rectT ///< Bounds + child *d16nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d16nodeT for each branch level +type d16nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d16maxNodes]d16branchT ///< Branch +} + +func (node *d16nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d16nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d16listNodeT struct { + next *d16listNodeT ///< Next in list + node *d16nodeT ///< Node +} + +const d16notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d16partitionVarsT struct { + partition [d16maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d16rectT + area [2]float64 + + branchBuf [d16maxNodes + 1]d16branchT + branchCount int + coverSplit d16rectT + coverSplitArea float64 +} + +func d16New() *d16RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d16RTree{ + root: &d16nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d16RTree) Insert(min, max [d16numDims]float64, dataId interface{}) { + var branch d16branchT + branch.data = dataId + for axis := 0; axis < d16numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d16insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d16RTree) Remove(min, max [d16numDims]float64, dataId interface{}) { + var rect d16rectT + for axis := 0; axis < d16numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d16removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d16search rectangle +/// \param a_min Min of d16search bounding rect +/// \param a_max Max of d16search bounding rect +/// \param a_searchResult d16search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d16RTree) Search(min, max [d16numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d16rectT + for axis := 0; axis < d16numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d16search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d16RTree) Count() int { + var count int + d16countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d16RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d16nodeT{} +} + +func d16countRec(node *d16nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d16countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d16insertRectRec(branch *d16branchT, node *d16nodeT, newNode **d16nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d16nodeT + //var newBranch d16branchT + + // find the optimal branch for this record + index := d16pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d16insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d16combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d16nodeCover(node.branch[index].child) + var newBranch d16branchT + newBranch.child = otherNode + newBranch.rect = d16nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d16addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d16addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d16insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d16insertRect(branch *d16branchT, root **d16nodeT, level int) bool { + var newNode *d16nodeT + + if d16insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d16nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d16branchT + + // add old root node as a child of the new root + newBranch.rect = d16nodeCover(*root) + newBranch.child = *root + d16addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d16nodeCover(newNode) + newBranch.child = newNode + d16addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d16nodeCover(node *d16nodeT) d16rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d16combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d16addBranch(branch *d16branchT, node *d16nodeT, newNode **d16nodeT) bool { + if node.count < d16maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d16splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d16disconnectBranch(node *d16nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d16pickBranch(rect *d16rectT, node *d16nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d16rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d16calcRectVolume(curRect) + tempRect = d16combineRect(rect, curRect) + increase = d16calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d16combineRect(rectA, rectB *d16rectT) d16rectT { + var newRect d16rectT + + for index := 0; index < d16numDims; index++ { + newRect.min[index] = d16fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d16fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d16splitNode(node *d16nodeT, branch *d16branchT, newNode **d16nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d16partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d16getBranches(node, branch, parVars) + + // Find partition + d16choosePartition(parVars, d16minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d16nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d16loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d16rectVolume(rect *d16rectT) float64 { + var volume float64 = 1 + for index := 0; index < d16numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d16rectT +func d16rectSphericalVolume(rect *d16rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d16numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d16numDims == 5 { + return (radius * radius * radius * radius * radius * d16unitSphereVolume) + } else if d16numDims == 4 { + return (radius * radius * radius * radius * d16unitSphereVolume) + } else if d16numDims == 3 { + return (radius * radius * radius * d16unitSphereVolume) + } else if d16numDims == 2 { + return (radius * radius * d16unitSphereVolume) + } else { + return (math.Pow(radius, d16numDims) * d16unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d16calcRectVolume(rect *d16rectT) float64 { + if d16useSphericalVolume { + return d16rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d16rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d16getBranches(node *d16nodeT, branch *d16branchT, parVars *d16partitionVarsT) { + // Load the branch buffer + for index := 0; index < d16maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d16maxNodes] = *branch + parVars.branchCount = d16maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d16maxNodes+1; index++ { + parVars.coverSplit = d16combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d16calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d16choosePartition(parVars *d16partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d16initParVars(parVars, parVars.branchCount, minFill) + d16pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d16notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d16combineRect(curRect, &parVars.cover[0]) + rect1 := d16combineRect(curRect, &parVars.cover[1]) + growth0 := d16calcRectVolume(&rect0) - parVars.area[0] + growth1 := d16calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d16classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d16notTaken == parVars.partition[index] { + d16classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d16loadNodes(nodeA, nodeB *d16nodeT, parVars *d16partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d16nodeT{nodeA, nodeB} + + // It is assured that d16addBranch here will not cause a node split. + d16addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d16partitionVarsT structure. +func d16initParVars(parVars *d16partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d16notTaken + } +} + +func d16pickSeeds(parVars *d16partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d16maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d16calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d16combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d16calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d16classify(seed0, 0, parVars) + d16classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d16classify(index, group int, parVars *d16partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d16combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d16calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d16rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d16removeRect provides for eliminating the root. +func d16removeRect(rect *d16rectT, id interface{}, root **d16nodeT) bool { + var reInsertList *d16listNodeT + + if !d16removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d16insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d16removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d16removeRectRec(rect *d16rectT, id interface{}, node *d16nodeT, listNode **d16listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d16overlap(*rect, node.branch[index].rect) { + if !d16removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d16minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d16nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d16reInsert(node.branch[index].child, listNode) + d16disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d16disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d16overlap. +func d16overlap(rectA, rectB d16rectT) bool { + for index := 0; index < d16numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d16reInsert(node *d16nodeT, listNode **d16listNodeT) { + newListNode := &d16listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d16search in an index tree or subtree for all data retangles that d16overlap the argument rectangle. +func d16search(node *d16nodeT, rect d16rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d16overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d16search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d16overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d17fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d17fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d17numDims = 17 + d17maxNodes = 8 + d17minNodes = d17maxNodes / 2 + d17useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d17unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d17numDims] + +type d17RTree struct { + root *d17nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d17rectT struct { + min [d17numDims]float64 ///< Min dimensions of bounding box + max [d17numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d17branchT struct { + rect d17rectT ///< Bounds + child *d17nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d17nodeT for each branch level +type d17nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d17maxNodes]d17branchT ///< Branch +} + +func (node *d17nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d17nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d17listNodeT struct { + next *d17listNodeT ///< Next in list + node *d17nodeT ///< Node +} + +const d17notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d17partitionVarsT struct { + partition [d17maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d17rectT + area [2]float64 + + branchBuf [d17maxNodes + 1]d17branchT + branchCount int + coverSplit d17rectT + coverSplitArea float64 +} + +func d17New() *d17RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d17RTree{ + root: &d17nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d17RTree) Insert(min, max [d17numDims]float64, dataId interface{}) { + var branch d17branchT + branch.data = dataId + for axis := 0; axis < d17numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d17insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d17RTree) Remove(min, max [d17numDims]float64, dataId interface{}) { + var rect d17rectT + for axis := 0; axis < d17numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d17removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d17search rectangle +/// \param a_min Min of d17search bounding rect +/// \param a_max Max of d17search bounding rect +/// \param a_searchResult d17search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d17RTree) Search(min, max [d17numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d17rectT + for axis := 0; axis < d17numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d17search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d17RTree) Count() int { + var count int + d17countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d17RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d17nodeT{} +} + +func d17countRec(node *d17nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d17countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d17insertRectRec(branch *d17branchT, node *d17nodeT, newNode **d17nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d17nodeT + //var newBranch d17branchT + + // find the optimal branch for this record + index := d17pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d17insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d17combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d17nodeCover(node.branch[index].child) + var newBranch d17branchT + newBranch.child = otherNode + newBranch.rect = d17nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d17addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d17addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d17insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d17insertRect(branch *d17branchT, root **d17nodeT, level int) bool { + var newNode *d17nodeT + + if d17insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d17nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d17branchT + + // add old root node as a child of the new root + newBranch.rect = d17nodeCover(*root) + newBranch.child = *root + d17addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d17nodeCover(newNode) + newBranch.child = newNode + d17addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d17nodeCover(node *d17nodeT) d17rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d17combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d17addBranch(branch *d17branchT, node *d17nodeT, newNode **d17nodeT) bool { + if node.count < d17maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d17splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d17disconnectBranch(node *d17nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d17pickBranch(rect *d17rectT, node *d17nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d17rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d17calcRectVolume(curRect) + tempRect = d17combineRect(rect, curRect) + increase = d17calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d17combineRect(rectA, rectB *d17rectT) d17rectT { + var newRect d17rectT + + for index := 0; index < d17numDims; index++ { + newRect.min[index] = d17fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d17fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d17splitNode(node *d17nodeT, branch *d17branchT, newNode **d17nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d17partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d17getBranches(node, branch, parVars) + + // Find partition + d17choosePartition(parVars, d17minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d17nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d17loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d17rectVolume(rect *d17rectT) float64 { + var volume float64 = 1 + for index := 0; index < d17numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d17rectT +func d17rectSphericalVolume(rect *d17rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d17numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d17numDims == 5 { + return (radius * radius * radius * radius * radius * d17unitSphereVolume) + } else if d17numDims == 4 { + return (radius * radius * radius * radius * d17unitSphereVolume) + } else if d17numDims == 3 { + return (radius * radius * radius * d17unitSphereVolume) + } else if d17numDims == 2 { + return (radius * radius * d17unitSphereVolume) + } else { + return (math.Pow(radius, d17numDims) * d17unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d17calcRectVolume(rect *d17rectT) float64 { + if d17useSphericalVolume { + return d17rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d17rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d17getBranches(node *d17nodeT, branch *d17branchT, parVars *d17partitionVarsT) { + // Load the branch buffer + for index := 0; index < d17maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d17maxNodes] = *branch + parVars.branchCount = d17maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d17maxNodes+1; index++ { + parVars.coverSplit = d17combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d17calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d17choosePartition(parVars *d17partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d17initParVars(parVars, parVars.branchCount, minFill) + d17pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d17notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d17combineRect(curRect, &parVars.cover[0]) + rect1 := d17combineRect(curRect, &parVars.cover[1]) + growth0 := d17calcRectVolume(&rect0) - parVars.area[0] + growth1 := d17calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d17classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d17notTaken == parVars.partition[index] { + d17classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d17loadNodes(nodeA, nodeB *d17nodeT, parVars *d17partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d17nodeT{nodeA, nodeB} + + // It is assured that d17addBranch here will not cause a node split. + d17addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d17partitionVarsT structure. +func d17initParVars(parVars *d17partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d17notTaken + } +} + +func d17pickSeeds(parVars *d17partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d17maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d17calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d17combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d17calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d17classify(seed0, 0, parVars) + d17classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d17classify(index, group int, parVars *d17partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d17combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d17calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d17rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d17removeRect provides for eliminating the root. +func d17removeRect(rect *d17rectT, id interface{}, root **d17nodeT) bool { + var reInsertList *d17listNodeT + + if !d17removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d17insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d17removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d17removeRectRec(rect *d17rectT, id interface{}, node *d17nodeT, listNode **d17listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d17overlap(*rect, node.branch[index].rect) { + if !d17removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d17minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d17nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d17reInsert(node.branch[index].child, listNode) + d17disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d17disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d17overlap. +func d17overlap(rectA, rectB d17rectT) bool { + for index := 0; index < d17numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d17reInsert(node *d17nodeT, listNode **d17listNodeT) { + newListNode := &d17listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d17search in an index tree or subtree for all data retangles that d17overlap the argument rectangle. +func d17search(node *d17nodeT, rect d17rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d17overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d17search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d17overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d18fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d18fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d18numDims = 18 + d18maxNodes = 8 + d18minNodes = d18maxNodes / 2 + d18useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d18unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d18numDims] + +type d18RTree struct { + root *d18nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d18rectT struct { + min [d18numDims]float64 ///< Min dimensions of bounding box + max [d18numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d18branchT struct { + rect d18rectT ///< Bounds + child *d18nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d18nodeT for each branch level +type d18nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d18maxNodes]d18branchT ///< Branch +} + +func (node *d18nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d18nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d18listNodeT struct { + next *d18listNodeT ///< Next in list + node *d18nodeT ///< Node +} + +const d18notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d18partitionVarsT struct { + partition [d18maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d18rectT + area [2]float64 + + branchBuf [d18maxNodes + 1]d18branchT + branchCount int + coverSplit d18rectT + coverSplitArea float64 +} + +func d18New() *d18RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d18RTree{ + root: &d18nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d18RTree) Insert(min, max [d18numDims]float64, dataId interface{}) { + var branch d18branchT + branch.data = dataId + for axis := 0; axis < d18numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d18insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d18RTree) Remove(min, max [d18numDims]float64, dataId interface{}) { + var rect d18rectT + for axis := 0; axis < d18numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d18removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d18search rectangle +/// \param a_min Min of d18search bounding rect +/// \param a_max Max of d18search bounding rect +/// \param a_searchResult d18search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d18RTree) Search(min, max [d18numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d18rectT + for axis := 0; axis < d18numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d18search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d18RTree) Count() int { + var count int + d18countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d18RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d18nodeT{} +} + +func d18countRec(node *d18nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d18countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d18insertRectRec(branch *d18branchT, node *d18nodeT, newNode **d18nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d18nodeT + //var newBranch d18branchT + + // find the optimal branch for this record + index := d18pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d18insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d18combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d18nodeCover(node.branch[index].child) + var newBranch d18branchT + newBranch.child = otherNode + newBranch.rect = d18nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d18addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d18addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d18insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d18insertRect(branch *d18branchT, root **d18nodeT, level int) bool { + var newNode *d18nodeT + + if d18insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d18nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d18branchT + + // add old root node as a child of the new root + newBranch.rect = d18nodeCover(*root) + newBranch.child = *root + d18addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d18nodeCover(newNode) + newBranch.child = newNode + d18addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d18nodeCover(node *d18nodeT) d18rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d18combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d18addBranch(branch *d18branchT, node *d18nodeT, newNode **d18nodeT) bool { + if node.count < d18maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d18splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d18disconnectBranch(node *d18nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d18pickBranch(rect *d18rectT, node *d18nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d18rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d18calcRectVolume(curRect) + tempRect = d18combineRect(rect, curRect) + increase = d18calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d18combineRect(rectA, rectB *d18rectT) d18rectT { + var newRect d18rectT + + for index := 0; index < d18numDims; index++ { + newRect.min[index] = d18fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d18fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d18splitNode(node *d18nodeT, branch *d18branchT, newNode **d18nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d18partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d18getBranches(node, branch, parVars) + + // Find partition + d18choosePartition(parVars, d18minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d18nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d18loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d18rectVolume(rect *d18rectT) float64 { + var volume float64 = 1 + for index := 0; index < d18numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d18rectT +func d18rectSphericalVolume(rect *d18rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d18numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d18numDims == 5 { + return (radius * radius * radius * radius * radius * d18unitSphereVolume) + } else if d18numDims == 4 { + return (radius * radius * radius * radius * d18unitSphereVolume) + } else if d18numDims == 3 { + return (radius * radius * radius * d18unitSphereVolume) + } else if d18numDims == 2 { + return (radius * radius * d18unitSphereVolume) + } else { + return (math.Pow(radius, d18numDims) * d18unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d18calcRectVolume(rect *d18rectT) float64 { + if d18useSphericalVolume { + return d18rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d18rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d18getBranches(node *d18nodeT, branch *d18branchT, parVars *d18partitionVarsT) { + // Load the branch buffer + for index := 0; index < d18maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d18maxNodes] = *branch + parVars.branchCount = d18maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d18maxNodes+1; index++ { + parVars.coverSplit = d18combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d18calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d18choosePartition(parVars *d18partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d18initParVars(parVars, parVars.branchCount, minFill) + d18pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d18notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d18combineRect(curRect, &parVars.cover[0]) + rect1 := d18combineRect(curRect, &parVars.cover[1]) + growth0 := d18calcRectVolume(&rect0) - parVars.area[0] + growth1 := d18calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d18classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d18notTaken == parVars.partition[index] { + d18classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d18loadNodes(nodeA, nodeB *d18nodeT, parVars *d18partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d18nodeT{nodeA, nodeB} + + // It is assured that d18addBranch here will not cause a node split. + d18addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d18partitionVarsT structure. +func d18initParVars(parVars *d18partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d18notTaken + } +} + +func d18pickSeeds(parVars *d18partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d18maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d18calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d18combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d18calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d18classify(seed0, 0, parVars) + d18classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d18classify(index, group int, parVars *d18partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d18combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d18calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d18rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d18removeRect provides for eliminating the root. +func d18removeRect(rect *d18rectT, id interface{}, root **d18nodeT) bool { + var reInsertList *d18listNodeT + + if !d18removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d18insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d18removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d18removeRectRec(rect *d18rectT, id interface{}, node *d18nodeT, listNode **d18listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d18overlap(*rect, node.branch[index].rect) { + if !d18removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d18minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d18nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d18reInsert(node.branch[index].child, listNode) + d18disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d18disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d18overlap. +func d18overlap(rectA, rectB d18rectT) bool { + for index := 0; index < d18numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d18reInsert(node *d18nodeT, listNode **d18listNodeT) { + newListNode := &d18listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d18search in an index tree or subtree for all data retangles that d18overlap the argument rectangle. +func d18search(node *d18nodeT, rect d18rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d18overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d18search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d18overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d19fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d19fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d19numDims = 19 + d19maxNodes = 8 + d19minNodes = d19maxNodes / 2 + d19useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d19unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d19numDims] + +type d19RTree struct { + root *d19nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d19rectT struct { + min [d19numDims]float64 ///< Min dimensions of bounding box + max [d19numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d19branchT struct { + rect d19rectT ///< Bounds + child *d19nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d19nodeT for each branch level +type d19nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d19maxNodes]d19branchT ///< Branch +} + +func (node *d19nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d19nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d19listNodeT struct { + next *d19listNodeT ///< Next in list + node *d19nodeT ///< Node +} + +const d19notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d19partitionVarsT struct { + partition [d19maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d19rectT + area [2]float64 + + branchBuf [d19maxNodes + 1]d19branchT + branchCount int + coverSplit d19rectT + coverSplitArea float64 +} + +func d19New() *d19RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d19RTree{ + root: &d19nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d19RTree) Insert(min, max [d19numDims]float64, dataId interface{}) { + var branch d19branchT + branch.data = dataId + for axis := 0; axis < d19numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d19insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d19RTree) Remove(min, max [d19numDims]float64, dataId interface{}) { + var rect d19rectT + for axis := 0; axis < d19numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d19removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d19search rectangle +/// \param a_min Min of d19search bounding rect +/// \param a_max Max of d19search bounding rect +/// \param a_searchResult d19search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d19RTree) Search(min, max [d19numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d19rectT + for axis := 0; axis < d19numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d19search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d19RTree) Count() int { + var count int + d19countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d19RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d19nodeT{} +} + +func d19countRec(node *d19nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d19countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d19insertRectRec(branch *d19branchT, node *d19nodeT, newNode **d19nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d19nodeT + //var newBranch d19branchT + + // find the optimal branch for this record + index := d19pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d19insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d19combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d19nodeCover(node.branch[index].child) + var newBranch d19branchT + newBranch.child = otherNode + newBranch.rect = d19nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d19addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d19addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d19insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d19insertRect(branch *d19branchT, root **d19nodeT, level int) bool { + var newNode *d19nodeT + + if d19insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d19nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d19branchT + + // add old root node as a child of the new root + newBranch.rect = d19nodeCover(*root) + newBranch.child = *root + d19addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d19nodeCover(newNode) + newBranch.child = newNode + d19addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d19nodeCover(node *d19nodeT) d19rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d19combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d19addBranch(branch *d19branchT, node *d19nodeT, newNode **d19nodeT) bool { + if node.count < d19maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d19splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d19disconnectBranch(node *d19nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d19pickBranch(rect *d19rectT, node *d19nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d19rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d19calcRectVolume(curRect) + tempRect = d19combineRect(rect, curRect) + increase = d19calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d19combineRect(rectA, rectB *d19rectT) d19rectT { + var newRect d19rectT + + for index := 0; index < d19numDims; index++ { + newRect.min[index] = d19fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d19fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d19splitNode(node *d19nodeT, branch *d19branchT, newNode **d19nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d19partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d19getBranches(node, branch, parVars) + + // Find partition + d19choosePartition(parVars, d19minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d19nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d19loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d19rectVolume(rect *d19rectT) float64 { + var volume float64 = 1 + for index := 0; index < d19numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d19rectT +func d19rectSphericalVolume(rect *d19rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d19numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d19numDims == 5 { + return (radius * radius * radius * radius * radius * d19unitSphereVolume) + } else if d19numDims == 4 { + return (radius * radius * radius * radius * d19unitSphereVolume) + } else if d19numDims == 3 { + return (radius * radius * radius * d19unitSphereVolume) + } else if d19numDims == 2 { + return (radius * radius * d19unitSphereVolume) + } else { + return (math.Pow(radius, d19numDims) * d19unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d19calcRectVolume(rect *d19rectT) float64 { + if d19useSphericalVolume { + return d19rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d19rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d19getBranches(node *d19nodeT, branch *d19branchT, parVars *d19partitionVarsT) { + // Load the branch buffer + for index := 0; index < d19maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d19maxNodes] = *branch + parVars.branchCount = d19maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d19maxNodes+1; index++ { + parVars.coverSplit = d19combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d19calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d19choosePartition(parVars *d19partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d19initParVars(parVars, parVars.branchCount, minFill) + d19pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d19notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d19combineRect(curRect, &parVars.cover[0]) + rect1 := d19combineRect(curRect, &parVars.cover[1]) + growth0 := d19calcRectVolume(&rect0) - parVars.area[0] + growth1 := d19calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d19classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d19notTaken == parVars.partition[index] { + d19classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d19loadNodes(nodeA, nodeB *d19nodeT, parVars *d19partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d19nodeT{nodeA, nodeB} + + // It is assured that d19addBranch here will not cause a node split. + d19addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d19partitionVarsT structure. +func d19initParVars(parVars *d19partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d19notTaken + } +} + +func d19pickSeeds(parVars *d19partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d19maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d19calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d19combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d19calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d19classify(seed0, 0, parVars) + d19classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d19classify(index, group int, parVars *d19partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d19combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d19calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d19rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d19removeRect provides for eliminating the root. +func d19removeRect(rect *d19rectT, id interface{}, root **d19nodeT) bool { + var reInsertList *d19listNodeT + + if !d19removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d19insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d19removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d19removeRectRec(rect *d19rectT, id interface{}, node *d19nodeT, listNode **d19listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d19overlap(*rect, node.branch[index].rect) { + if !d19removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d19minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d19nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d19reInsert(node.branch[index].child, listNode) + d19disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d19disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d19overlap. +func d19overlap(rectA, rectB d19rectT) bool { + for index := 0; index < d19numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d19reInsert(node *d19nodeT, listNode **d19listNodeT) { + newListNode := &d19listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d19search in an index tree or subtree for all data retangles that d19overlap the argument rectangle. +func d19search(node *d19nodeT, rect d19rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d19overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d19search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d19overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} + +func d20fmin(a, b float64) float64 { + if a < b { + return a + } + return b +} +func d20fmax(a, b float64) float64 { + if a > b { + return a + } + return b +} + +const ( + d20numDims = 20 + d20maxNodes = 8 + d20minNodes = d20maxNodes / 2 + d20useSphericalVolume = true // Better split classification, may be slower on some systems +) + +var d20unitSphereVolume = []float64{ + 0.000000, 2.000000, 3.141593, // Dimension 0,1,2 + 4.188790, 4.934802, 5.263789, // Dimension 3,4,5 + 5.167713, 4.724766, 4.058712, // Dimension 6,7,8 + 3.298509, 2.550164, 1.884104, // Dimension 9,10,11 + 1.335263, 0.910629, 0.599265, // Dimension 12,13,14 + 0.381443, 0.235331, 0.140981, // Dimension 15,16,17 + 0.082146, 0.046622, 0.025807, // Dimension 18,19,20 +}[d20numDims] + +type d20RTree struct { + root *d20nodeT ///< Root of tree +} + +/// Minimal bounding rectangle (n-dimensional) +type d20rectT struct { + min [d20numDims]float64 ///< Min dimensions of bounding box + max [d20numDims]float64 ///< Max dimensions of bounding box +} + +/// May be data or may be another subtree +/// The parents level determines this. +/// If the parents level is 0, then this is data +type d20branchT struct { + rect d20rectT ///< Bounds + child *d20nodeT ///< Child node + data interface{} ///< Data Id or Ptr +} + +/// d20nodeT for each branch level +type d20nodeT struct { + count int ///< Count + level int ///< Leaf is zero, others positive + branch [d20maxNodes]d20branchT ///< Branch +} + +func (node *d20nodeT) isInternalNode() bool { + return (node.level > 0) // Not a leaf, but a internal node +} +func (node *d20nodeT) isLeaf() bool { + return (node.level == 0) // A leaf, contains data +} + +/// A link list of nodes for reinsertion after a delete operation +type d20listNodeT struct { + next *d20listNodeT ///< Next in list + node *d20nodeT ///< Node +} + +const d20notTaken = -1 // indicates that position + +/// Variables for finding a split partition +type d20partitionVarsT struct { + partition [d20maxNodes + 1]int + total int + minFill int + count [2]int + cover [2]d20rectT + area [2]float64 + + branchBuf [d20maxNodes + 1]d20branchT + branchCount int + coverSplit d20rectT + coverSplitArea float64 +} + +func d20New() *d20RTree { + // We only support machine word size simple data type eg. integer index or object pointer. + // Since we are storing as union with non data branch + return &d20RTree{ + root: &d20nodeT{}, + } +} + +/// Insert entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d20RTree) Insert(min, max [d20numDims]float64, dataId interface{}) { + var branch d20branchT + branch.data = dataId + for axis := 0; axis < d20numDims; axis++ { + branch.rect.min[axis] = min[axis] + branch.rect.max[axis] = max[axis] + } + d20insertRect(&branch, &tr.root, 0) +} + +/// Remove entry +/// \param a_min Min of bounding rect +/// \param a_max Max of bounding rect +/// \param a_dataId Positive Id of data. Maybe zero, but negative numbers not allowed. +func (tr *d20RTree) Remove(min, max [d20numDims]float64, dataId interface{}) { + var rect d20rectT + for axis := 0; axis < d20numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + d20removeRect(&rect, dataId, &tr.root) +} + +/// Find all within d20search rectangle +/// \param a_min Min of d20search bounding rect +/// \param a_max Max of d20search bounding rect +/// \param a_searchResult d20search result array. Caller should set grow size. Function will reset, not append to array. +/// \param a_resultCallback Callback function to return result. Callback should return 'true' to continue searching +/// \param a_context User context to pass as parameter to a_resultCallback +/// \return Returns the number of entries found +func (tr *d20RTree) Search(min, max [d20numDims]float64, resultCallback func(data interface{}) bool) int { + var rect d20rectT + for axis := 0; axis < d20numDims; axis++ { + rect.min[axis] = min[axis] + rect.max[axis] = max[axis] + } + foundCount, _ := d20search(tr.root, rect, 0, resultCallback) + return foundCount +} + +/// Count the data elements in this container. This is slow as no internal counter is maintained. +func (tr *d20RTree) Count() int { + var count int + d20countRec(tr.root, &count) + return count +} + +/// Remove all entries from tree +func (tr *d20RTree) RemoveAll() { + // Delete all existing nodes + tr.root = &d20nodeT{} +} + +func d20countRec(node *d20nodeT, count *int) { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + d20countRec(node.branch[index].child, count) + } + } else { // A leaf node + *count += node.count + } +} + +// Inserts a new data rectangle into the index structure. +// Recursively descends tree, propagates splits back up. +// Returns 0 if node was not split. Old node updated. +// If node was split, returns 1 and sets the pointer pointed to by +// new_node to point to the new node. Old node updated to become one of two. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +func d20insertRectRec(branch *d20branchT, node *d20nodeT, newNode **d20nodeT, level int) bool { + // recurse until we reach the correct level for the new record. data records + // will always be called with a_level == 0 (leaf) + if node.level > level { + // Still above level for insertion, go down tree recursively + var otherNode *d20nodeT + //var newBranch d20branchT + + // find the optimal branch for this record + index := d20pickBranch(&branch.rect, node) + + // recursively insert this record into the picked branch + childWasSplit := d20insertRectRec(branch, node.branch[index].child, &otherNode, level) + + if !childWasSplit { + // Child was not split. Merge the bounding box of the new record with the + // existing bounding box + node.branch[index].rect = d20combineRect(&branch.rect, &(node.branch[index].rect)) + return false + } else { + // Child was split. The old branches are now re-partitioned to two nodes + // so we have to re-calculate the bounding boxes of each node + node.branch[index].rect = d20nodeCover(node.branch[index].child) + var newBranch d20branchT + newBranch.child = otherNode + newBranch.rect = d20nodeCover(otherNode) + + // The old node is already a child of a_node. Now add the newly-created + // node to a_node as well. a_node might be split because of that. + return d20addBranch(&newBranch, node, newNode) + } + } else if node.level == level { + // We have reached level for insertion. Add rect, split if necessary + return d20addBranch(branch, node, newNode) + } else { + // Should never occur + return false + } +} + +// Insert a data rectangle into an index structure. +// d20insertRect provides for splitting the root; +// returns 1 if root was split, 0 if it was not. +// The level argument specifies the number of steps up from the leaf +// level to insert; e.g. a data rectangle goes in at level = 0. +// InsertRect2 does the recursion. +// +func d20insertRect(branch *d20branchT, root **d20nodeT, level int) bool { + var newNode *d20nodeT + + if d20insertRectRec(branch, *root, &newNode, level) { // Root split + + // Grow tree taller and new root + newRoot := &d20nodeT{} + newRoot.level = (*root).level + 1 + + var newBranch d20branchT + + // add old root node as a child of the new root + newBranch.rect = d20nodeCover(*root) + newBranch.child = *root + d20addBranch(&newBranch, newRoot, nil) + + // add the split node as a child of the new root + newBranch.rect = d20nodeCover(newNode) + newBranch.child = newNode + d20addBranch(&newBranch, newRoot, nil) + + // set the new root as the root node + *root = newRoot + + return true + } + return false +} + +// Find the smallest rectangle that includes all rectangles in branches of a node. +func d20nodeCover(node *d20nodeT) d20rectT { + rect := node.branch[0].rect + for index := 1; index < node.count; index++ { + rect = d20combineRect(&rect, &(node.branch[index].rect)) + } + return rect +} + +// Add a branch to a node. Split the node if necessary. +// Returns 0 if node not split. Old node updated. +// Returns 1 if node split, sets *new_node to address of new node. +// Old node updated, becomes one of two. +func d20addBranch(branch *d20branchT, node *d20nodeT, newNode **d20nodeT) bool { + if node.count < d20maxNodes { // Split won't be necessary + node.branch[node.count] = *branch + node.count++ + return false + } else { + d20splitNode(node, branch, newNode) + return true + } +} + +// Disconnect a dependent node. +// Caller must return (or stop using iteration index) after this as count has changed +func d20disconnectBranch(node *d20nodeT, index int) { + // Remove element by swapping with the last element to prevent gaps in array + node.branch[index] = node.branch[node.count-1] + node.branch[node.count-1].data = nil + node.branch[node.count-1].child = nil + node.count-- +} + +// Pick a branch. Pick the one that will need the smallest increase +// in area to accomodate the new rectangle. This will result in the +// least total area for the covering rectangles in the current node. +// In case of a tie, pick the one which was smaller before, to get +// the best resolution when searching. +func d20pickBranch(rect *d20rectT, node *d20nodeT) int { + var firstTime bool = true + var increase float64 + var bestIncr float64 = -1 + var area float64 + var bestArea float64 + var best int + var tempRect d20rectT + + for index := 0; index < node.count; index++ { + curRect := &node.branch[index].rect + area = d20calcRectVolume(curRect) + tempRect = d20combineRect(rect, curRect) + increase = d20calcRectVolume(&tempRect) - area + if (increase < bestIncr) || firstTime { + best = index + bestArea = area + bestIncr = increase + firstTime = false + } else if (increase == bestIncr) && (area < bestArea) { + best = index + bestArea = area + bestIncr = increase + } + } + return best +} + +// Combine two rectangles into larger one containing both +func d20combineRect(rectA, rectB *d20rectT) d20rectT { + var newRect d20rectT + + for index := 0; index < d20numDims; index++ { + newRect.min[index] = d20fmin(rectA.min[index], rectB.min[index]) + newRect.max[index] = d20fmax(rectA.max[index], rectB.max[index]) + } + + return newRect +} + +// Split a node. +// Divides the nodes branches and the extra one between two nodes. +// Old node is one of the new ones, and one really new one is created. +// Tries more than one method for choosing a partition, uses best result. +func d20splitNode(node *d20nodeT, branch *d20branchT, newNode **d20nodeT) { + // Could just use local here, but member or external is faster since it is reused + var localVars d20partitionVarsT + parVars := &localVars + + // Load all the branches into a buffer, initialize old node + d20getBranches(node, branch, parVars) + + // Find partition + d20choosePartition(parVars, d20minNodes) + + // Create a new node to hold (about) half of the branches + *newNode = &d20nodeT{} + (*newNode).level = node.level + + // Put branches from buffer into 2 nodes according to the chosen partition + node.count = 0 + d20loadNodes(node, *newNode, parVars) +} + +// Calculate the n-dimensional volume of a rectangle +func d20rectVolume(rect *d20rectT) float64 { + var volume float64 = 1 + for index := 0; index < d20numDims; index++ { + volume *= rect.max[index] - rect.min[index] + } + return volume +} + +// The exact volume of the bounding sphere for the given d20rectT +func d20rectSphericalVolume(rect *d20rectT) float64 { + var sumOfSquares float64 = 0 + var radius float64 + + for index := 0; index < d20numDims; index++ { + halfExtent := (rect.max[index] - rect.min[index]) * 0.5 + sumOfSquares += halfExtent * halfExtent + } + + radius = math.Sqrt(sumOfSquares) + + // Pow maybe slow, so test for common dims just use x*x, x*x*x. + if d20numDims == 5 { + return (radius * radius * radius * radius * radius * d20unitSphereVolume) + } else if d20numDims == 4 { + return (radius * radius * radius * radius * d20unitSphereVolume) + } else if d20numDims == 3 { + return (radius * radius * radius * d20unitSphereVolume) + } else if d20numDims == 2 { + return (radius * radius * d20unitSphereVolume) + } else { + return (math.Pow(radius, d20numDims) * d20unitSphereVolume) + } +} + +// Use one of the methods to calculate retangle volume +func d20calcRectVolume(rect *d20rectT) float64 { + if d20useSphericalVolume { + return d20rectSphericalVolume(rect) // Slower but helps certain merge cases + } else { // RTREE_USE_SPHERICAL_VOLUME + return d20rectVolume(rect) // Faster but can cause poor merges + } // RTREE_USE_SPHERICAL_VOLUME +} + +// Load branch buffer with branches from full node plus the extra branch. +func d20getBranches(node *d20nodeT, branch *d20branchT, parVars *d20partitionVarsT) { + // Load the branch buffer + for index := 0; index < d20maxNodes; index++ { + parVars.branchBuf[index] = node.branch[index] + } + parVars.branchBuf[d20maxNodes] = *branch + parVars.branchCount = d20maxNodes + 1 + + // Calculate rect containing all in the set + parVars.coverSplit = parVars.branchBuf[0].rect + for index := 1; index < d20maxNodes+1; index++ { + parVars.coverSplit = d20combineRect(&parVars.coverSplit, &parVars.branchBuf[index].rect) + } + parVars.coverSplitArea = d20calcRectVolume(&parVars.coverSplit) +} + +// Method #0 for choosing a partition: +// As the seeds for the two groups, pick the two rects that would waste the +// most area if covered by a single rectangle, i.e. evidently the worst pair +// to have in the same group. +// Of the remaining, one at a time is chosen to be put in one of the two groups. +// The one chosen is the one with the greatest difference in area expansion +// depending on which group - the rect most strongly attracted to one group +// and repelled from the other. +// If one group gets too full (more would force other group to violate min +// fill requirement) then other group gets the rest. +// These last are the ones that can go in either group most easily. +func d20choosePartition(parVars *d20partitionVarsT, minFill int) { + var biggestDiff float64 + var group, chosen, betterGroup int + + d20initParVars(parVars, parVars.branchCount, minFill) + d20pickSeeds(parVars) + + for ((parVars.count[0] + parVars.count[1]) < parVars.total) && + (parVars.count[0] < (parVars.total - parVars.minFill)) && + (parVars.count[1] < (parVars.total - parVars.minFill)) { + biggestDiff = -1 + for index := 0; index < parVars.total; index++ { + if d20notTaken == parVars.partition[index] { + curRect := &parVars.branchBuf[index].rect + rect0 := d20combineRect(curRect, &parVars.cover[0]) + rect1 := d20combineRect(curRect, &parVars.cover[1]) + growth0 := d20calcRectVolume(&rect0) - parVars.area[0] + growth1 := d20calcRectVolume(&rect1) - parVars.area[1] + diff := growth1 - growth0 + if diff >= 0 { + group = 0 + } else { + group = 1 + diff = -diff + } + + if diff > biggestDiff { + biggestDiff = diff + chosen = index + betterGroup = group + } else if (diff == biggestDiff) && (parVars.count[group] < parVars.count[betterGroup]) { + chosen = index + betterGroup = group + } + } + } + d20classify(chosen, betterGroup, parVars) + } + + // If one group too full, put remaining rects in the other + if (parVars.count[0] + parVars.count[1]) < parVars.total { + if parVars.count[0] >= parVars.total-parVars.minFill { + group = 1 + } else { + group = 0 + } + for index := 0; index < parVars.total; index++ { + if d20notTaken == parVars.partition[index] { + d20classify(index, group, parVars) + } + } + } +} + +// Copy branches from the buffer into two nodes according to the partition. +func d20loadNodes(nodeA, nodeB *d20nodeT, parVars *d20partitionVarsT) { + for index := 0; index < parVars.total; index++ { + targetNodeIndex := parVars.partition[index] + targetNodes := []*d20nodeT{nodeA, nodeB} + + // It is assured that d20addBranch here will not cause a node split. + d20addBranch(&parVars.branchBuf[index], targetNodes[targetNodeIndex], nil) + } +} + +// Initialize a d20partitionVarsT structure. +func d20initParVars(parVars *d20partitionVarsT, maxRects, minFill int) { + parVars.count[0] = 0 + parVars.count[1] = 0 + parVars.area[0] = 0 + parVars.area[1] = 0 + parVars.total = maxRects + parVars.minFill = minFill + for index := 0; index < maxRects; index++ { + parVars.partition[index] = d20notTaken + } +} + +func d20pickSeeds(parVars *d20partitionVarsT) { + var seed0, seed1 int + var worst, waste float64 + var area [d20maxNodes + 1]float64 + + for index := 0; index < parVars.total; index++ { + area[index] = d20calcRectVolume(&parVars.branchBuf[index].rect) + } + + worst = -parVars.coverSplitArea - 1 + for indexA := 0; indexA < parVars.total-1; indexA++ { + for indexB := indexA + 1; indexB < parVars.total; indexB++ { + oneRect := d20combineRect(&parVars.branchBuf[indexA].rect, &parVars.branchBuf[indexB].rect) + waste = d20calcRectVolume(&oneRect) - area[indexA] - area[indexB] + if waste > worst { + worst = waste + seed0 = indexA + seed1 = indexB + } + } + } + + d20classify(seed0, 0, parVars) + d20classify(seed1, 1, parVars) +} + +// Put a branch in one of the groups. +func d20classify(index, group int, parVars *d20partitionVarsT) { + parVars.partition[index] = group + + // Calculate combined rect + if parVars.count[group] == 0 { + parVars.cover[group] = parVars.branchBuf[index].rect + } else { + parVars.cover[group] = d20combineRect(&parVars.branchBuf[index].rect, &parVars.cover[group]) + } + + // Calculate volume of combined rect + parVars.area[group] = d20calcRectVolume(&parVars.cover[group]) + + parVars.count[group]++ +} + +// Delete a data rectangle from an index structure. +// Pass in a pointer to a d20rectT, the tid of the record, ptr to ptr to root node. +// Returns 1 if record not found, 0 if success. +// d20removeRect provides for eliminating the root. +func d20removeRect(rect *d20rectT, id interface{}, root **d20nodeT) bool { + var reInsertList *d20listNodeT + + if !d20removeRectRec(rect, id, *root, &reInsertList) { + // Found and deleted a data item + // Reinsert any branches from eliminated nodes + for reInsertList != nil { + tempNode := reInsertList.node + + for index := 0; index < tempNode.count; index++ { + // TODO go over this code. should I use (tempNode->m_level - 1)? + d20insertRect(&tempNode.branch[index], root, tempNode.level) + } + reInsertList = reInsertList.next + } + + // Check for redundant root (not leaf, 1 child) and eliminate TODO replace + // if with while? In case there is a whole branch of redundant roots... + if (*root).count == 1 && (*root).isInternalNode() { + tempNode := (*root).branch[0].child + *root = tempNode + } + return false + } else { + return true + } +} + +// Delete a rectangle from non-root part of an index structure. +// Called by d20removeRect. Descends tree recursively, +// merges branches on the way back up. +// Returns 1 if record not found, 0 if success. +func d20removeRectRec(rect *d20rectT, id interface{}, node *d20nodeT, listNode **d20listNodeT) bool { + if node.isInternalNode() { // not a leaf node + for index := 0; index < node.count; index++ { + if d20overlap(*rect, node.branch[index].rect) { + if !d20removeRectRec(rect, id, node.branch[index].child, listNode) { + if node.branch[index].child.count >= d20minNodes { + // child removed, just resize parent rect + node.branch[index].rect = d20nodeCover(node.branch[index].child) + } else { + // child removed, not enough entries in node, eliminate node + d20reInsert(node.branch[index].child, listNode) + d20disconnectBranch(node, index) // Must return after this call as count has changed + } + return false + } + } + } + return true + } else { // A leaf node + for index := 0; index < node.count; index++ { + if node.branch[index].data == id { + d20disconnectBranch(node, index) // Must return after this call as count has changed + return false + } + } + return true + } +} + +// Decide whether two rectangles d20overlap. +func d20overlap(rectA, rectB d20rectT) bool { + for index := 0; index < d20numDims; index++ { + if rectA.min[index] > rectB.max[index] || + rectB.min[index] > rectA.max[index] { + return false + } + } + return true +} + +// Add a node to the reinsertion list. All its branches will later +// be reinserted into the index structure. +func d20reInsert(node *d20nodeT, listNode **d20listNodeT) { + newListNode := &d20listNodeT{} + newListNode.node = node + newListNode.next = *listNode + *listNode = newListNode +} + +// d20search in an index tree or subtree for all data retangles that d20overlap the argument rectangle. +func d20search(node *d20nodeT, rect d20rectT, foundCount int, resultCallback func(data interface{}) bool) (int, bool) { + if node.isInternalNode() { + // This is an internal node in the tree + for index := 0; index < node.count; index++ { + if d20overlap(rect, node.branch[index].rect) { + var ok bool + foundCount, ok = d20search(node.branch[index].child, rect, foundCount, resultCallback) + if !ok { + // The callback indicated to stop searching + return foundCount, false + } + } + } + } else { + // This is a leaf node + for index := 0; index < node.count; index++ { + if d20overlap(rect, node.branch[index].rect) { + id := node.branch[index].data + foundCount++ + if !resultCallback(id) { + return foundCount, false // Don't continue searching + } + + } + } + } + return foundCount, true // Continue searching +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000..fc31160 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000..f8b807f --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "golang.org/x/crypto/blowfish" + "io" + "strconv" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + ckey := append(key, 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n += 1 + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n += 1 + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 0000000..fa9e48e --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,194 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm as +// defined in RFC 7693. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var errKeySize = errors.New("blake2b: invalid key size") + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(b []byte) []byte { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + var sum [Size]byte + for i, v := range h[:(d.size+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } + + return append(b, sum[:d.size]...) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 0000000..8c41cf6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +func init() { + useAVX2 = supportsAVX2() + useAVX = supportsAVX() + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func supportsAVX() bool + +//go:noescape +func supportsAVX2() bool + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useAVX2 { + hashBlocksAVX2(h, c, flag, blocks) + } else if useAVX { + hashBlocksAVX(h, c, flag, blocks) + } else if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 0000000..784bce6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,762 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET + +// func supportsAVX2() bool +TEXT ·supportsAVX2(SB), 4, $0-1 + MOVQ runtime·support_avx2(SB), AX + MOVB AX, ret+0(FP) + RET + +// func supportsAVX() bool +TEXT ·supportsAVX(SB), 4, $0-1 + MOVQ runtime·support_avx(SB), AX + MOVB AX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 0000000..2ab7c30 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +func init() { + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 0000000..6453074 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,290 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET + +// func supportsSSE4() bool +TEXT ·supportsSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4 support + ANDL $1, CX // CX != 0 if support SSE4 + MOVB CX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 0000000..4bd2abc --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import "encoding/binary" + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 0000000..da156a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 0000000..efd689a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000..9d80f19 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000..a73954f --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See http://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000..8c5ee4c --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// http://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000..593f653 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 0000000..7455395 --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,243 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (http://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "errors" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + x := xy + y := xy[32*r:] + + j := 0 + for i := 0; i < 32*r; i++ { + x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24 + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*(32*r):], x, 32*r) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*(32*r):], y, 32*r) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*(32*r):], 32*r) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*(32*r):], 32*r) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:32*r] { + b[j+0] = byte(v >> 0) + b[j+1] = byte(v >> 8) + b[j+2] = byte(v >> 16) + b[j+3] = byte(v >> 24) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2009 are N=16384, +// r=8, p=1. They should be increased as memory latency and CPU parallelism +// increases. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/gopkg.in/hlandau/easymetric.v1/cexp/cexp.go b/vendor/gopkg.in/hlandau/easymetric.v1/cexp/cexp.go new file mode 100644 index 0000000..fd6d16a --- /dev/null +++ b/vendor/gopkg.in/hlandau/easymetric.v1/cexp/cexp.go @@ -0,0 +1,97 @@ +package cexp + +import "gopkg.in/hlandau/measurable.v1" +import "sync/atomic" + +// Counter + +type Counter struct { + name string + value int64 +} + +func (c *Counter) MsName() string { + return c.name +} + +func (c *Counter) MsInt64() int64 { + return atomic.LoadInt64(&c.value) +} + +func (c *Counter) Get() int64 { + return c.MsInt64() +} + +// v must be non-negative. +func (c *Counter) Add(v int64) { + atomic.AddInt64(&c.value, v) +} + +func (c *Counter) Inc() { + c.Add(1) +} + +func (c *Counter) MsType() measurable.Type { + return measurable.CounterType +} + +func NewCounter(name string) *Counter { + c := &Counter{ + name: name, + } + + measurable.Register(c) + return c +} + +// Gauge + +type Gauge struct { + name string + value int64 +} + +func (c *Gauge) MsName() string { + return c.name +} + +func (c *Gauge) MsInt64() int64 { + return atomic.LoadInt64(&c.value) +} + +func (c *Gauge) Add(v int64) { + atomic.AddInt64(&c.value, v) +} + +func (c *Gauge) Sub(v int64) { + c.Add(-v) +} + +func (c *Gauge) Set(v int64) { + atomic.StoreInt64(&c.value, v) +} + +func (c *Gauge) Get() int64 { + return c.MsInt64() +} + +func (c *Gauge) Inc() { + c.Add(1) +} + +func (c *Gauge) Dec() { + c.Add(-1) +} + +func (c *Gauge) MsType() measurable.Type { + return measurable.GaugeType +} + +func NewGauge(name string) *Gauge { + c := &Gauge{ + name: name, + } + + measurable.Register(c) + return c +} diff --git a/vendor/gopkg.in/hlandau/measurable.v1/README.md b/vendor/gopkg.in/hlandau/measurable.v1/README.md new file mode 100644 index 0000000..f2d582d --- /dev/null +++ b/vendor/gopkg.in/hlandau/measurable.v1/README.md @@ -0,0 +1,82 @@ +Measurable: The useless Go metric registration package that doesn't do anything +=============================================================================== + +[![GoDoc](https://godoc.org/gopkg.in/hlandau/measurable.v1?status.svg)](https://godoc.org/gopkg.in/hlandau/measurable.v1) + +Measurable is a Go library for managing the registration of metrics such as +counters and gauges, no matter how that metric data is eventually consumed. + +The most noteworthy feature of measurable is that it doesn't do anything. It +contains no functionality for providing metric data to any external service, +and it contains no actual metric implementations. + +The purpose of measurable is to act as an [integration +nexus](https://www.devever.net/~hl/nexuses), essentially a matchmaker between +metric sources and metric consumers. This creates the important feature that +your application's metrics can be expressed completely independently of *how* +those metrics are exported. + +Measurable doesn't implement any metric or metric export logic because it +strives to be a neutral intermediary, which abstracts the interface between +metrics and metric exporters. + +**Import as:** `gopkg.in/hlandau/measurable.v1` + +Measurable +---------- + +A Measurable is an object that represents some metric. It is obliged only to +implement the following interface: + +```go +type Measurable interface { + MsName() string + MsType() Type +} +``` + +Measurable is designed around interface upgrades. If you want to actually +do anything with a Measurable, you must attempt to cast it to an interface +with the methods you need. A Measurable is not obliged to implement any +interface besides Measurable, but almost always will. + +Here are some common interfaces implemented by Measurables, in descending order +of importance: + + - `MsName() string` — get the Measurable name. + - `MsType() Type` — get the Measurable type. + - `MsInt64() int64` — get the Measurable as an int64. + - `String() string` — the standard Go `String()` interface. + +All Measurables should implement `MsName() string` and `MsType() Type`. + +Measurable-specific methods should always be prefixed by `Ms` so it is clear +they are intended for consumption by Measurable consumers. + +`MsName`, `MsType` and `MsInt64` should suffice for most consumers of Counter +and Gauge metric types. + +Metrics should be named in lowercase using dots to create a hierarchy and +dashes to separate words, e.g. `someserver.http.request-count`. These metric +names may be transmuted by consumers as necessary for some graphing systems, +such as Prometheus (which allows only underscores). + +Standard Bindings +----------------- + +For a package which makes it easy to register and consume measurables, see the +[easymetric](https://github.com/hlandau/easymetric) package. + +Of course, nothing requires you to use the easymetric package. You are free to escew it and make your own. + +Background Reading +------------------ + + - [On Nexuses](https://www.devever.net/~hl/nexuses) + - See also: [Configurable](https://github.com/hlandau/configurable) + +Licence +------- + + © 2015 Hugo Landau