1
0
mirror of https://github.com/kataras/iris.git synced 2026-05-11 00:23:50 +00:00

(#1554) Add support for all common compressions (write and read)

- Remove the context.Context interface and export the *context, the iris.Context now points to the pointer\nSupport compression and rate limiting in the FileServer\nBit of code organisation


Former-commit-id: ad1c61bf968059510c6be9e7f2cceec7da70ba17
This commit is contained in:
Gerasimos (Makis) Maropoulos
2020-07-10 23:21:09 +03:00
parent 645da2b2ef
commit 0f113dfcda
112 changed files with 2119 additions and 3390 deletions

206
context/accept_header.go Normal file
View File

@@ -0,0 +1,206 @@
package context
import "strings"
func negotiationMatch(in []string, priorities []string) string {
// e.g.
// match json:
// in: text/html, application/json
// prioritities: application/json
// not match:
// in: text/html, application/json
// prioritities: text/xml
// match html:
// in: text/html, application/json
// prioritities: */*
// not match:
// in: application/json
// prioritities: text/xml
// match json:
// in: text/html, application/*
// prioritities: application/json
if len(priorities) == 0 {
return ""
}
if len(in) == 0 {
return priorities[0]
}
for _, accepted := range in {
for _, p := range priorities {
// wildcard is */* or text/* and etc.
// so loop through each char.
for i, n := 0, len(accepted); i < n; i++ {
if accepted[i] != p[i] {
break
}
if accepted[i] == '*' || p[i] == '*' {
return p
}
if i == n-1 {
return p
}
}
}
}
return ""
}
func negotiateAcceptHeader(in []string, offers []string, bestOffer string) string {
if bestOffer == "" {
bestOffer = IDENTITY
}
bestQ := -1.0
specs := parseAccept(in)
for _, offer := range offers {
for _, spec := range specs {
if spec.Q > bestQ &&
(spec.Value == "*" || spec.Value == offer) {
bestQ = spec.Q
bestOffer = offer
}
}
}
if bestQ == 0 {
bestOffer = ""
}
return bestOffer
}
// acceptSpec describes an Accept* header.
type acceptSpec struct {
Value string
Q float64
}
// parseAccept parses Accept* headers.
func parseAccept(in []string) (specs []acceptSpec) {
loop:
for _, s := range in {
for {
var spec acceptSpec
spec.Value, s = expectTokenSlash(s)
if spec.Value == "" {
continue loop
}
spec.Q = 1.0
s = skipSpace(s)
if strings.HasPrefix(s, ";") {
s = skipSpace(s[1:])
if !strings.HasPrefix(s, "q=") {
continue loop
}
spec.Q, s = expectQuality(s[2:])
if spec.Q < 0.0 {
continue loop
}
}
specs = append(specs, spec)
s = skipSpace(s)
if !strings.HasPrefix(s, ",") {
continue loop
}
s = skipSpace(s[1:])
}
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectTokenSlash(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
b := s[i]
if (octetTypes[b]&isToken == 0) && b != '/' {
break
}
}
return s[:i], s[i:]
}
func expectQuality(s string) (q float64, rest string) {
switch {
case len(s) == 0:
return -1, ""
case s[0] == '0':
q = 0
case s[0] == '1':
q = 1
default:
return -1, ""
}
s = s[1:]
if !strings.HasPrefix(s, ".") {
return q, s
}
s = s[1:]
i := 0
n := 0
d := 1
for ; i < len(s); i++ {
b := s[i]
if b < '0' || b > '9' {
break
}
n = n*10 + int(b) - '0'
d *= 10
}
return q + float64(n)/float64(d), s[i:]
}
// Octet types from RFC 2616.
var octetTypes [256]octetType
type octetType byte
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}

View File

@@ -35,7 +35,7 @@ type Application interface {
// i.e: routing within a foreign context.
//
// It is ready to use after Build state.
ServeHTTPC(ctx Context)
ServeHTTPC(ctx *Context)
// ServeHTTP is the main router handler which calls the .Serve and acquires a new context from the pool.
//
@@ -65,11 +65,11 @@ type Application interface {
// then it will try to reset the headers and the body before calling the
// registered (or default) error handler for that error code set by
// `ctx.StatusCode` method.
FireErrorCode(ctx Context)
FireErrorCode(ctx *Context)
// RouteExists reports whether a particular route exists
// It will search from the current subdomain of context's host, if not inside the root domain.
RouteExists(ctx Context, method, path string) bool
RouteExists(ctx *Context, method, path string) bool
// FindClosestPaths returns a list of "n" paths close to "path" under the given "subdomain".
//
// Order may change.

273
context/compress.go Normal file
View File

@@ -0,0 +1,273 @@
package context
import (
"errors"
"fmt"
"io"
"net/http"
"sync"
"github.com/andybalholm/brotli"
"github.com/klauspost/compress/flate"
"github.com/klauspost/compress/gzip"
"github.com/klauspost/compress/s2"
"github.com/klauspost/compress/snappy"
)
// The available builtin compression algorithms.
const (
GZIP = "gzip"
DEFLATE = "deflate"
BROTLI = "br"
SNAPPY = "snappy"
S2 = "s2"
)
// IDENTITY no transformation whatsoever.
const IDENTITY = "identity"
var (
// ErrResponseNotCompressed returned from AcquireCompressResponseWriter
// when response's Content-Type header is missing due to golang/go/issues/31753 or
// when accept-encoding is empty. The caller should fallback to the original response writer.
ErrResponseNotCompressed = errors.New("compress: response will not be compressed")
// ErrRequestNotCompressed returned from NewCompressReader
// when request is not compressed.
ErrRequestNotCompressed = errors.New("compress: request is not compressed")
// ErrNotSupportedCompression returned from
// AcquireCompressResponseWriter, NewCompressWriter and NewCompressReader
// when the request's Accept-Encoding was not found in the server's supported
// compression algorithms. Check that error with `errors.Is`.
ErrNotSupportedCompression = errors.New("compress: unsupported compression")
)
type (
noOpWriter struct{}
noOpReadCloser struct {
io.Reader
}
)
var (
_ io.ReadCloser = (*noOpReadCloser)(nil)
_ io.Writer = (*noOpWriter)(nil)
)
func (w *noOpWriter) Write(p []byte) (int, error) { return 0, nil }
func (r *noOpReadCloser) Close() error {
return nil
}
// CompressWriter is an interface which all compress writers should implement.
type CompressWriter interface {
io.WriteCloser
// All known implementations contain `Flush` and `Reset` methods,
// so we wanna declare them upfront.
Flush() error
Reset(io.Writer)
}
// NewCompressWriter returns a CompressWriter of "w" based on the given "encoding".
func NewCompressWriter(w io.Writer, encoding string, level int) (cw CompressWriter, err error) {
switch encoding {
case GZIP:
cw, err = gzip.NewWriterLevel(w, level)
case DEFLATE: // -1 default level, same for gzip.
cw, err = flate.NewWriter(w, level)
case BROTLI: // 6 default level.
if level == -1 {
level = 6
}
cw = brotli.NewWriterLevel(w, level)
case SNAPPY:
cw = snappy.NewWriter(w)
case S2:
cw = s2.NewWriter(w)
default:
// Throw if "identity" is given. As this is not acceptable on "Content-Encoding" header.
// Only Accept-Encoding (client) can use that; it means, no transformation whatsoever.
err = ErrNotSupportedCompression
}
return
}
// CompressReader is a structure which wraps a compressed reader.
// It is used for determination across common request body and a compressed one.
type CompressReader struct {
io.ReadCloser
// We need this to reset the body to its original state, if requested.
Src io.ReadCloser
// Encoding is the compression alogirthm is used to decompress and read the data.
Encoding string
}
// NewCompressReader returns a new "compressReader" wrapper of "src".
// It returns `ErrRequestNotCompressed` if client's request data are not compressed
// or `ErrNotSupportedCompression` if server missing the decompression algorithm.
// Note: on server-side the request body (src) will be closed automaticaly.
func NewCompressReader(src io.Reader, encoding string) (*CompressReader, error) {
if encoding == "" || src == nil {
return nil, ErrRequestNotCompressed
}
var (
rc io.ReadCloser
err error
)
switch encoding {
case GZIP:
rc, err = gzip.NewReader(src)
case DEFLATE:
rc = &noOpReadCloser{flate.NewReader(src)}
case BROTLI:
rc = &noOpReadCloser{brotli.NewReader(src)}
case SNAPPY:
rc = &noOpReadCloser{snappy.NewReader(src)}
case S2:
rc = &noOpReadCloser{s2.NewReader(src)}
default:
err = ErrNotSupportedCompression
}
if err != nil {
return nil, err
}
srcReadCloser, ok := src.(io.ReadCloser)
if !ok {
srcReadCloser = &noOpReadCloser{src}
}
return &CompressReader{
ReadCloser: rc,
Src: srcReadCloser,
Encoding: encoding,
}, nil
}
var compressWritersPool = sync.Pool{New: func() interface{} { return &CompressResponseWriter{} }}
// AddCompressHeaders just adds the headers "Vary" to "Accept-Encoding"
// and "Content-Encoding" to the given encoding.
func AddCompressHeaders(h http.Header, encoding string) {
h.Set(VaryHeaderKey, AcceptEncodingHeaderKey)
h.Set(ContentEncodingHeaderKey, encoding)
}
// CompressResponseWriter is a compressed data http.ResponseWriter.
type CompressResponseWriter struct {
CompressWriter
ResponseWriter
Disabled bool
Encoding string
Level int
}
var _ ResponseWriter = (*CompressResponseWriter)(nil)
// AcquireCompressResponseWriter returns a CompressResponseWriter from the pool.
// It accepts an Iris response writer, a net/http request value and
// the level of compression (use -1 for default compression level).
//
// It returns the best candidate among "gzip", "defate", "br", "snappy" and "s2"
// based on the request's "Accept-Encoding" header value.
func AcquireCompressResponseWriter(w ResponseWriter, r *http.Request, level int) (*CompressResponseWriter, error) {
acceptEncoding := r.Header.Values(AcceptEncodingHeaderKey)
if len(acceptEncoding) == 0 {
return nil, ErrResponseNotCompressed
}
encoding := negotiateAcceptHeader(acceptEncoding, []string{"gzip", "deflate", "br", "snappy", "s2"}, "")
if encoding == "" {
return nil, fmt.Errorf("%w: %s", ErrNotSupportedCompression, encoding)
}
AddCompressHeaders(w.Header(), encoding)
v := compressWritersPool.Get().(*CompressResponseWriter)
v.ResponseWriter = w
v.Disabled = false
if level == -1 && encoding == BROTLI {
level = 6
}
// Writer exists, encoding matching and it's valid because it has a non nil encWriter;
// just reset to reduce allocations.
if v.Encoding == encoding && v.Level == level && v.CompressWriter != nil {
v.CompressWriter.Reset(w)
return v, nil
}
v.Encoding = encoding
v.Level = level
encWriter, err := NewCompressWriter(w, encoding, level)
if err != nil {
return nil, err
}
v.CompressWriter = encWriter
return v, nil
}
func releaseCompressResponseWriter(w *CompressResponseWriter) {
compressWritersPool.Put(w)
}
// FlushResponse flushes any data, closes the underline compress writer
// and writes the status code.
// Called automatically before `EndResponse`.
func (w *CompressResponseWriter) FlushResponse() {
if w.Disabled {
w.Header().Del(VaryHeaderKey)
w.Header().Del(ContentEncodingHeaderKey)
w.CompressWriter.Reset(&noOpWriter{})
w.CompressWriter.Close()
} else {
w.ResponseWriter.Header().Del(ContentLengthHeaderKey)
w.CompressWriter.Close() // flushes and closes.
}
w.ResponseWriter.FlushResponse()
}
// EndResponse reeases the writers.
func (w *CompressResponseWriter) EndResponse() {
w.ResponseWriter.EndResponse()
releaseCompressResponseWriter(w)
}
func (w *CompressResponseWriter) Write(p []byte) (int, error) {
if w.Disabled {
// If disabled or the content-type is empty the response will not be compressed (golang/go/issues/31753).
return w.ResponseWriter.Write(p)
}
if w.Header().Get(ContentTypeHeaderKey) == "" {
w.Header().Set(ContentTypeHeaderKey, http.DetectContentType(p))
}
return w.CompressWriter.Write(p)
}
// Flush sends any buffered data to the client.
// Can be called manually.
func (w *CompressResponseWriter) Flush() {
// if w.Disabled {
// w.Header().Del(VaryHeaderKey)
// w.Header().Del(ContentEncodingHeaderKey)
// } else {
// w.encWriter.Flush()
// }
if !w.Disabled {
w.CompressWriter.Flush()
}
w.ResponseWriter.Flush()
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,231 +0,0 @@
package context
import (
"fmt"
"io"
"sync"
"github.com/klauspost/compress/gzip"
)
// compressionPool is a wrapper of sync.Pool, to initialize a new compression writer pool
type compressionPool struct {
sync.Pool
Level int
}
// +------------------------------------------------------------+
// |GZIP raw io.writer, our gzip response writer will use that. |
// +------------------------------------------------------------+
// default writer pool with Compressor's level set to -1
var gzipPool = &compressionPool{Level: -1}
// acquireGzipWriter prepares a gzip writer and returns it.
//
// see releaseGzipWriter too.
func acquireGzipWriter(w io.Writer) *gzip.Writer {
v := gzipPool.Get()
if v == nil {
gzipWriter, err := gzip.NewWriterLevel(w, gzipPool.Level)
if err != nil {
return nil
}
return gzipWriter
}
gzipWriter := v.(*gzip.Writer)
gzipWriter.Reset(w)
return gzipWriter
}
// releaseGzipWriter called when flush/close and put the gzip writer back to the pool.
//
// see acquireGzipWriter too.
func releaseGzipWriter(gzipWriter *gzip.Writer) {
gzipWriter.Close()
gzipPool.Put(gzipWriter)
}
// writeGzip writes a compressed form of p to the underlying io.Writer. The
// compressed bytes are not necessarily flushed until the Writer is closed.
func writeGzip(w io.Writer, b []byte) (int, error) {
gzipWriter := acquireGzipWriter(w)
n, err := gzipWriter.Write(b)
if err != nil {
releaseGzipWriter(gzipWriter)
return -1, err
}
err = gzipWriter.Flush()
releaseGzipWriter(gzipWriter)
return n, err
}
var gzpool = sync.Pool{New: func() interface{} { return &GzipResponseWriter{} }}
// AcquireGzipResponseWriter returns a new *GzipResponseWriter from the pool.
// Releasing is done automatically when request and response is done.
func AcquireGzipResponseWriter() *GzipResponseWriter {
w := gzpool.Get().(*GzipResponseWriter)
return w
}
func releaseGzipResponseWriter(w *GzipResponseWriter) {
gzpool.Put(w)
}
// GzipResponseWriter is an upgraded response writer which writes compressed data to the underline ResponseWriter.
//
// It's a separate response writer because iris gives you the ability to "fallback" and "roll-back" the gzip encoding if something
// went wrong with the response, and write http errors in plain form instead.
type GzipResponseWriter struct {
ResponseWriter
chunks []byte
disabled bool
}
var _ ResponseWriter = (*GzipResponseWriter)(nil)
// BeginGzipResponse accepts a ResponseWriter
// and prepares the new gzip response writer.
// It's being called per-handler, when caller decide
// to change the response writer type.
func (w *GzipResponseWriter) BeginGzipResponse(underline ResponseWriter) {
w.ResponseWriter = underline
w.chunks = w.chunks[0:0]
w.disabled = false
}
// EndResponse called right before the contents of this
// response writer are flushed to the client.
func (w *GzipResponseWriter) EndResponse() {
releaseGzipResponseWriter(w)
w.ResponseWriter.EndResponse()
}
// Write prepares the data write to the gzip writer and finally to its
// underline response writer, returns the uncompressed len(contents).
func (w *GzipResponseWriter) Write(contents []byte) (int, error) {
// save the contents to serve them (only gzip data here)
w.chunks = append(w.chunks, contents...)
return len(contents), nil
}
// Writef formats according to a format specifier and writes to the response.
//
// Returns the number of bytes written and any write error encountered.
func (w *GzipResponseWriter) Writef(format string, a ...interface{}) (n int, err error) {
n, err = fmt.Fprintf(w, format, a...)
if err == nil {
h := w.ResponseWriter.Header()
if h[ContentTypeHeaderKey] == nil {
h.Set(ContentTypeHeaderKey, ContentTextHeaderValue)
}
}
return
}
// WriteString prepares the string data write to the gzip writer and finally to its
// underline response writer, returns the uncompressed len(contents).
func (w *GzipResponseWriter) WriteString(s string) (n int, err error) {
n, err = w.Write([]byte(s))
if err == nil {
h := w.ResponseWriter.Header()
if h[ContentTypeHeaderKey] == nil {
h.Set(ContentTypeHeaderKey, ContentTextHeaderValue)
}
}
return
}
// WriteNow compresses and writes that data to the underline response writer,
// returns the compressed written len.
//
// Use `WriteNow` instead of `Write`
// when you need to know the compressed written size before
// the `FlushResponse`, note that you can't post any new headers
// after that, so that information is not closed to the handler anymore.
func (w *GzipResponseWriter) WriteNow(contents []byte) (int, error) {
if w.disabled {
// type noOp struct{}
//
// func (n noOp) Write([]byte) (int, error) {
// return 0, nil
// }
//
// var noop = noOp{}
// problem solved with w.gzipWriter.Reset(noop):
//
// the below Write called multiple times but not from here,
// the gzip writer does something to the writer, even if we don't call the
// w.gzipWriter.Write it does call the underline http.ResponseWriter
// multiple times, and therefore it changes the content-length
// the problem that results to the #723.
//
// Or a better idea, acquire and adapt the gzip writer on-time when is not disabled.
// So that is not needed any more:
// w.gzipWriter.Reset(noop)
return w.ResponseWriter.Write(contents)
}
AddGzipHeaders(w.ResponseWriter)
// if not `WriteNow` but "Content-Length" header
// is exists, then delete it before `.Write`
// Content-Length should not be there.
// no, for now at least: w.ResponseWriter.Header().Del(contentLengthHeaderKey)
return writeGzip(w.ResponseWriter, contents)
}
// AddGzipHeaders just adds the headers "Vary" to "Accept-Encoding"
// and "Content-Encoding" to "gzip".
func AddGzipHeaders(w ResponseWriter) {
w.Header().Add(VaryHeaderKey, AcceptEncodingHeaderKey)
w.Header().Add(ContentEncodingHeaderKey, GzipHeaderValue)
}
// Body returns the body tracked from the writer so far,
// do not use this for edit.
func (w *GzipResponseWriter) Body() []byte {
return w.chunks
}
// ResetBody resets the response body.
// Implements the `ResponseWriterBodyReseter`.
func (w *GzipResponseWriter) ResetBody() {
w.chunks = w.chunks[0:0]
}
// Disable turns off the gzip compression for the next .Write's data,
// if called then the contents are being written in plain form.
func (w *GzipResponseWriter) Disable() {
w.disabled = true
}
// Reset disables the gzip content writer, clears headers, sets the status code to 200
// and clears the cached body.
//
// Implements the `ResponseWriterReseter`.
func (w *GzipResponseWriter) Reset() bool {
// disable gzip content writer.
w.Disable()
// clear headers.
h := w.ResponseWriter.Header()
for k := range h {
h[k] = nil
}
// restore status code.
w.WriteHeader(defaultStatusCode)
// reset body.
w.ResetBody()
return true
}
// FlushResponse validates the response headers in order to be compatible with the gzip written data
// and writes the data to the underline ResponseWriter.
func (w *GzipResponseWriter) FlushResponse() {
_, _ = w.WriteNow(w.chunks)
w.ResponseWriter.FlushResponse()
}

View File

@@ -82,7 +82,7 @@ func (expr *nameExpr) MatchString(s string) bool {
//
// If Handler panics, the server (the caller of Handler) assumes that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log, and hangs up the connection.
type Handler func(Context)
type Handler func(*Context)
// Handlers is just a type of slice of []Handler.
//
@@ -235,7 +235,7 @@ func ingoreMainHandlerName(name string) bool {
// based on the incoming request.
//
// See `NewConditionalHandler` for more.
type Filter func(Context) bool
type Filter func(*Context) bool
// NewConditionalHandler returns a single Handler which can be registered
// as a middleware.
@@ -254,7 +254,7 @@ type Filter func(Context) bool
//
// Example can be found at: _examples/routing/conditional-chain.
func NewConditionalHandler(filter Filter, handlers ...Handler) Handler {
return func(ctx Context) {
return func(ctx *Context) {
if filter(ctx) {
// Note that we don't want just to fire the incoming handlers, we must make sure
// that it won't break any further handler chain

View File

@@ -6,7 +6,7 @@ import "golang.org/x/text/language"
// Read the "i18n" package fo details.
type I18nReadOnly interface {
Tags() []language.Tag
GetLocale(ctx Context) Locale
GetLocale(ctx *Context) Locale
Tr(lang string, format string, args ...interface{}) string
}

View File

@@ -6,41 +6,26 @@ import (
)
// Pool is the context pool, it's used inside router and the framework by itself.
//
// It's the only one real implementation inside this package because it used widely.
type Pool struct {
pool *sync.Pool
newFunc func() Context // we need a field otherwise is not working if we change the return value
pool *sync.Pool
}
// New creates and returns a new context pool.
func New(newFunc func() Context) *Pool {
c := &Pool{pool: &sync.Pool{}, newFunc: newFunc}
c.pool.New = func() interface{} { return c.newFunc() }
return c
}
// Attach changes the pool's return value Context.
//
// The new Context should explicitly define the `Next()`
// and `Do(context.Handlers)` functions.
//
// Example: https://github.com/kataras/iris/blob/master/_examples/routing/custom-context/method-overriding/main.go
func (c *Pool) Attach(newFunc func() Context) {
c.newFunc = newFunc
func New(newFunc func() interface{}) *Pool {
return &Pool{pool: &sync.Pool{New: newFunc}}
}
// Acquire returns a Context from pool.
// See Release.
func (c *Pool) Acquire(w http.ResponseWriter, r *http.Request) Context {
ctx := c.pool.Get().(Context)
func (c *Pool) Acquire(w http.ResponseWriter, r *http.Request) *Context {
ctx := c.pool.Get().(*Context)
ctx.BeginRequest(w, r)
return ctx
}
// Release puts a Context back to its pull, this function releases its resources.
// See Acquire.
func (c *Pool) Release(ctx Context) {
func (c *Pool) Release(ctx *Context) {
ctx.EndRequest()
c.pool.Put(ctx)
}
@@ -48,6 +33,6 @@ func (c *Pool) Release(ctx Context) {
// ReleaseLight will just release the object back to the pool, but the
// clean method is caller's responsibility now, currently this is only used
// on `SPABuilder`.
func (c *Pool) ReleaseLight(ctx Context) {
func (c *Pool) ReleaseLight(ctx *Context) {
c.pool.Put(ctx)
}

View File

@@ -73,7 +73,7 @@ func (p Problem) getURI(key string) string {
}
// Updates "type" field to absolute URI, recursively.
func (p Problem) updateURIsToAbs(ctx Context) {
func (p Problem) updateURIsToAbs(ctx *Context) {
if p == nil {
return
}
@@ -271,7 +271,7 @@ type ProblemOptions struct {
// Should return time.Time, time.Duration, int64, int, float64 or string.
//
// Overrides the RetryAfter field.
RetryAfterFunc func(Context) interface{}
RetryAfterFunc func(*Context) interface{}
}
func parseDurationToSeconds(dur time.Duration) int64 {
@@ -310,7 +310,7 @@ func (o *ProblemOptions) parseRetryAfter(value interface{}, timeLayout string) s
}
// Apply accepts a Context and applies specific response-time options.
func (o *ProblemOptions) Apply(ctx Context) {
func (o *ProblemOptions) Apply(ctx *Context) {
retryAfterHeaderValue := ""
timeLayout := ctx.Application().ConfigurationReadOnly().GetTimeFormat()

View File

@@ -95,7 +95,7 @@ func (r RequestParams) GetIntUnslashed(key string) (int, bool) {
// The value is a function which accepts the parameter index
// and it should return the value as the parameter type evaluator expects it.
// i.e [reflect.TypeOf("string")] = func(paramIndex int) interface{} {
// return func(ctx Context) <T> {
// return func(ctx *Context) <T> {
// return ctx.Params().GetEntryAt(paramIndex).ValueRaw.(<T>)
// }
// }
@@ -107,7 +107,7 @@ func (r RequestParams) GetIntUnslashed(key string) (int, bool) {
// when on the second requested path, the 'pssecond' should be empty.
var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
reflect.TypeOf(""): func(paramIndex int) interface{} {
return func(ctx Context) string {
return func(ctx *Context) string {
if ctx.Params().Len() <= paramIndex {
return ""
}
@@ -115,7 +115,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(int(1)): func(paramIndex int) interface{} {
return func(ctx Context) int {
return func(ctx *Context) int {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -125,7 +125,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(int8(1)): func(paramIndex int) interface{} {
return func(ctx Context) int8 {
return func(ctx *Context) int8 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -133,7 +133,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(int16(1)): func(paramIndex int) interface{} {
return func(ctx Context) int16 {
return func(ctx *Context) int16 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -141,7 +141,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(int32(1)): func(paramIndex int) interface{} {
return func(ctx Context) int32 {
return func(ctx *Context) int32 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -149,7 +149,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(int64(1)): func(paramIndex int) interface{} {
return func(ctx Context) int64 {
return func(ctx *Context) int64 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -157,7 +157,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(uint(1)): func(paramIndex int) interface{} {
return func(ctx Context) uint {
return func(ctx *Context) uint {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -165,7 +165,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(uint8(1)): func(paramIndex int) interface{} {
return func(ctx Context) uint8 {
return func(ctx *Context) uint8 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -173,7 +173,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(uint16(1)): func(paramIndex int) interface{} {
return func(ctx Context) uint16 {
return func(ctx *Context) uint16 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -181,7 +181,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(uint32(1)): func(paramIndex int) interface{} {
return func(ctx Context) uint32 {
return func(ctx *Context) uint32 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -189,7 +189,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(uint64(1)): func(paramIndex int) interface{} {
return func(ctx Context) uint64 {
return func(ctx *Context) uint64 {
if ctx.Params().Len() <= paramIndex {
return 0
}
@@ -197,7 +197,7 @@ var ParamResolvers = map[reflect.Type]func(paramIndex int) interface{}{
}
},
reflect.TypeOf(true): func(paramIndex int) interface{} {
return func(ctx Context) bool {
return func(ctx *Context) bool {
if ctx.Params().Len() <= paramIndex {
return false
}
@@ -219,7 +219,7 @@ func ParamResolverByTypeAndIndex(typ reflect.Type, paramIndex int) (reflect.Valu
/* NO:
// This could work but its result is not exact type, so direct binding is not possible.
resolver := m.ParamResolver
fn := func(ctx context.Context) interface{} {
fn := func(ctx *context.Context) interface{} {
entry, _ := ctx.Params().GetEntry(paramName)
return resolver(entry)
}
@@ -227,10 +227,10 @@ func ParamResolverByTypeAndIndex(typ reflect.Type, paramIndex int) (reflect.Valu
// This works but it is slower on serve-time.
paramNameValue := []reflect.Value{reflect.ValueOf(paramName)}
var fnSignature func(context.Context) string
var fnSignature func(*context.Context) string
return reflect.MakeFunc(reflect.ValueOf(&fnSignature).Elem().Type(), func(in []reflect.Value) []reflect.Value {
return in[0].MethodByName("Params").Call(emptyIn)[0].MethodByName("Get").Call(paramNameValue)
// return []reflect.Value{reflect.ValueOf(in[0].Interface().(context.Context).Params().Get(paramName))}
// return []reflect.Value{reflect.ValueOf(in[0].Interface().(*context.Context).Params().Get(paramName))}
})
//
*/

View File

@@ -1,13 +1,12 @@
package context
import (
"fmt"
"net/http"
"sync"
)
// Recorder the middleware to enable response writer recording ( ResponseWriter -> ResponseRecorder)
var Recorder = func(ctx Context) {
var Recorder = func(ctx *Context) {
ctx.Record()
ctx.Next()
}
@@ -90,20 +89,6 @@ func (w *ResponseRecorder) Write(contents []byte) (int, error) {
return len(contents), nil
}
// Writef formats according to a format specifier and writes to the response.
//
// Returns the number of bytes written and any write error encountered.
func (w *ResponseRecorder) Writef(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, a...)
}
// WriteString writes a simple string to the response.
//
// Returns the number of bytes written and any write error encountered
func (w *ResponseRecorder) WriteString(s string) (n int, err error) {
return w.Write([]byte(s))
}
// SetBody overrides the body and sets it to a slice of bytes value.
func (w *ResponseRecorder) SetBody(b []byte) {
w.chunks = b

View File

@@ -3,8 +3,6 @@ package context
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"net/http"
"sync"
@@ -15,8 +13,8 @@ import (
//
// Note: Only this ResponseWriter is an interface in order to be able
// for developers to change the response writer of the Context via `context.ResetResponseWriter`.
// The rest of the response writers implementations (ResponseRecorder & GzipResponseWriter) are coupled to the internal
// ResponseWriter implementation(*responseWriter).
// The rest of the response writers implementations (ResponseRecorder & CompressResponseWriter)
// are coupled to the internal ResponseWriter implementation(*responseWriter).
//
// A ResponseWriter may not be used after the Handler
// has returned.
@@ -45,16 +43,6 @@ type ResponseWriter interface {
// IsHijacked reports whether this response writer's connection is hijacked.
IsHijacked() bool
// Writef formats according to a format specifier and writes to the response.
//
// Returns the number of bytes written and any write error encountered.
Writef(format string, a ...interface{}) (n int, err error)
// WriteString writes a simple string to the response.
//
// Returns the number of bytes written and any write error encountered.
WriteString(s string) (n int, err error)
// StatusCode returns the status code header value.
StatusCode() int
@@ -279,23 +267,6 @@ func (w *responseWriter) Write(contents []byte) (int, error) {
return n, err
}
// Writef formats according to a format specifier and writes to the response.
//
// Returns the number of bytes written and any write error encountered.
func (w *responseWriter) Writef(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, a...)
}
// WriteString writes a simple string to the response.
//
// Returns the number of bytes written and any write error encountered.
func (w *responseWriter) WriteString(s string) (int, error) {
w.tryWriteHeader()
n, err := io.WriteString(w.ResponseWriter, s)
w.written += n
return n, err
}
// StatusCode returns the status code header value
func (w *responseWriter) StatusCode() int {
return w.statusCode

View File

@@ -34,14 +34,14 @@ func NewTransactionErrResult() TransactionErrResult {
type TransactionScope interface {
// EndTransaction returns if can continue to the next transactions or not (false)
// called after Complete, empty or not empty error
EndTransaction(maybeErr TransactionErrResult, ctx Context) bool
EndTransaction(maybeErr TransactionErrResult, ctx *Context) bool
}
// TransactionScopeFunc the transaction's scope signature
type TransactionScopeFunc func(maybeErr TransactionErrResult, ctx Context) bool
type TransactionScopeFunc func(maybeErr TransactionErrResult, ctx *Context) bool
// EndTransaction ends the transaction with a callback to itself, implements the TransactionScope interface
func (tsf TransactionScopeFunc) EndTransaction(maybeErr TransactionErrResult, ctx Context) bool {
func (tsf TransactionScopeFunc) EndTransaction(maybeErr TransactionErrResult, ctx *Context) bool {
return tsf(maybeErr, ctx)
}
@@ -60,13 +60,13 @@ func (tsf TransactionScopeFunc) EndTransaction(maybeErr TransactionErrResult, ct
//
// For more information please visit the tests.
type Transaction struct {
context Context
parent Context
context *Context
parent *Context
hasError bool
scope TransactionScope
}
func newTransaction(from *context) *Transaction {
func newTransaction(from *Context) *Transaction {
tempCtx := *from
writer := tempCtx.ResponseWriter().Clone()
tempCtx.ResetResponseWriter(writer)
@@ -80,7 +80,7 @@ func newTransaction(from *context) *Transaction {
}
// Context returns the current context of the transaction.
func (t *Transaction) Context() Context {
func (t *Transaction) Context() *Context {
return t.context
}
@@ -138,7 +138,7 @@ func (t *Transaction) Complete(err error) {
// independent 'silent' scope, if transaction fails (if transaction.IsFailure() == true)
// then its response is not written to the real context no error is provided to the user.
// useful for the most cases.
var TransientTransactionScope = TransactionScopeFunc(func(maybeErr TransactionErrResult, ctx Context) bool {
var TransientTransactionScope = TransactionScopeFunc(func(maybeErr TransactionErrResult, ctx *Context) bool {
if maybeErr.IsFailure() {
ctx.Recorder().Reset() // this response is skipped because it's empty.
}
@@ -150,7 +150,7 @@ var TransientTransactionScope = TransactionScopeFunc(func(maybeErr TransactionEr
// if scope fails (if transaction.IsFailure() == true)
// then the rest of the context's response (transaction or normal flow)
// is not written to the client, and an error status code is written instead.
var RequestTransactionScope = TransactionScopeFunc(func(maybeErr TransactionErrResult, ctx Context) bool {
var RequestTransactionScope = TransactionScopeFunc(func(maybeErr TransactionErrResult, ctx *Context) bool {
if maybeErr.IsFailure() {
// we need to register a beforeResponseFlush event here in order