1
0
mirror of https://github.com/kataras/iris.git synced 2026-01-22 11:25:59 +00:00

Publish the new version ✈️ | Look description please!

# FAQ

### Looking for free support?

	http://support.iris-go.com
    https://kataras.rocket.chat/channel/iris

### Looking for previous versions?

    https://github.com/kataras/iris#version

### Should I upgrade my Iris?

Developers are not forced to upgrade if they don't really need it. Upgrade whenever you feel ready.
> Iris uses the [vendor directory](https://docs.google.com/document/d/1Bz5-UB7g2uPBdOx-rw5t9MxJwkfpx90cqG9AFL0JAYo) feature, so you get truly reproducible builds, as this method guards against upstream renames and deletes.

**How to upgrade**: Open your command-line and execute this command: `go get -u github.com/kataras/iris`.
For further installation support, please click [here](http://support.iris-go.com/d/16-how-to-install-iris-web-framework).

### About our new home page
    http://iris-go.com

Thanks to [Santosh Anand](https://github.com/santoshanand) the http://iris-go.com has been upgraded and it's really awesome!

[Santosh](https://github.com/santoshanand) is a freelancer, he has a great knowledge of nodejs and express js, Android, iOS, React Native, Vue.js etc, if you need a developer to find or create a solution for your problem or task, please contact with him.

The amount of the next two or three donations you'll send they will be immediately transferred to his own account balance, so be generous please!

Read more at https://github.com/kataras/iris/blob/master/HISTORY.md


Former-commit-id: eec2d71bbe011d6b48d2526eb25919e36e5ad94e
This commit is contained in:
kataras
2017-06-03 23:22:52 +03:00
parent 03bcadadec
commit 5e4b63acb2
330 changed files with 35786 additions and 17316 deletions

66
core/host/proxy.go Normal file
View File

@@ -0,0 +1,66 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"net/http"
"net/http/httputil"
"net/url"
"strings"
)
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// ProxyHandler returns a new ReverseProxy that rewrites
// URLs to the scheme, host, and base path provided in target. If the
// target's path is "/base" and the incoming request was for "/dir",
// the target request will be for /base/dir.
//
// Relative to httputil.NewSingleHostReverseProxy with some additions.
// Used for the deprecated `LETSENCRYPT`.
func ProxyHandler(target *url.URL) *httputil.ReverseProxy {
targetQuery := target.RawQuery
director := func(req *http.Request) {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.Host = target.Host
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
if targetQuery == "" || req.URL.RawQuery == "" {
req.URL.RawQuery = targetQuery + req.URL.RawQuery
} else {
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
}
}
return &httputil.ReverseProxy{Director: director}
}
// NewProxy returns a new host (server supervisor) which
// redirects all requests to the target.
// It uses the httputil.NewSingleHostReverseProxy.
//
// Usage:
// target,_ := url.Parse("https://mydomain.com")
// proxy := NewProxy("mydomain.com:80", target)
// proxy.ListenAndServe() // use of proxy.Shutdown to close the proxy server.
func NewProxy(hostAddr string, target *url.URL) *Supervisor {
proxyHandler := ProxyHandler(target)
proxy := New(&http.Server{
Addr: hostAddr,
Handler: proxyHandler,
})
return proxy
}

132
core/host/scheduler.go Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"sync/atomic"
)
type task struct {
runner TaskRunner
proc TaskProcess
// atomic-accessed, if != 0 means that is already
// canceled before it ever ran, this happens to interrupt handlers too.
alreadyCanceled int32
Cancel func()
}
func (t *task) isCanceled() bool {
return atomic.LoadInt32(&t.alreadyCanceled) != 0
}
type Scheduler struct {
onServeTasks []*task
onInterruptTasks []*task
}
type TaskCancelFunc func()
func (s *Scheduler) Schedule(runner TaskRunner) TaskCancelFunc {
t := new(task)
t.runner = runner
t.Cancel = func() {
// it's not running yet, so if canceled now
// set to already canceled to not run it at all.
atomic.StoreInt32(&t.alreadyCanceled, 1)
}
if _, ok := runner.(OnInterrupt); ok {
s.onInterruptTasks = append(s.onInterruptTasks, t)
} else {
s.onServeTasks = append(s.onServeTasks, t)
}
return func() {
t.Cancel()
}
}
func (s *Scheduler) ScheduleFunc(runner func(TaskProcess)) TaskCancelFunc {
return s.Schedule(TaskRunnerFunc(runner))
}
func cancelTasks(tasks []*task) {
for _, t := range tasks {
if atomic.LoadInt32(&t.alreadyCanceled) != 0 {
continue // canceled, don't run it
}
go t.Cancel()
}
}
func (s *Scheduler) CancelOnServeTasks() {
cancelTasks(s.onServeTasks)
}
func (s *Scheduler) CancelOnInterruptTasks() {
cancelTasks(s.onInterruptTasks)
}
func runTaskNow(task *task, host TaskHost) {
proc := newTaskProcess(host)
task.proc = proc
task.Cancel = func() {
proc.canceledChan <- struct{}{}
}
go task.runner.Run(proc)
}
func runTasks(tasks []*task, host TaskHost) {
for _, t := range tasks {
if t.isCanceled() {
continue
}
runTaskNow(t, host)
}
}
func (s *Scheduler) runOnServe(host TaskHost) {
runTasks(s.onServeTasks, host)
}
func (s *Scheduler) runOnInterrupt(host TaskHost) {
runTasks(s.onInterruptTasks, host)
}
func (s *Scheduler) visit(visitor func(*task)) {
for _, t := range s.onServeTasks {
visitor(t)
}
for _, t := range s.onInterruptTasks {
visitor(t)
}
}
func (s *Scheduler) notifyShutdown() {
s.visit(func(t *task) {
go func() {
t.proc.Host().doneChan <- struct{}{}
}()
})
}
func (s *Scheduler) notifyErr(err error) {
s.visit(func(t *task) {
go func() {
t.proc.Host().errChan <- err
}()
})
}
func (s *Scheduler) CopyTo(to *Scheduler) {
s.visit(func(t *task) {
rnner := t.runner
to.Schedule(rnner)
})
}

View File

@@ -0,0 +1,83 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"context"
"fmt"
"log"
"net"
"net/http"
"os"
"time"
)
type myTestTask struct {
delay time.Duration
logger *log.Logger
}
func (m myTestTask) Run(proc TaskProcess) {
ticker := time.NewTicker(m.delay)
defer ticker.Stop()
rans := 0
for {
select {
case _, ok := <-ticker.C:
{
if !ok {
m.logger.Println("ticker issue, closed channel, exiting from this task...")
return
}
rans++
m.logger.Println(fmt.Sprintf("%d", rans))
}
case <-proc.Done():
{
m.logger.Println("canceled, exiting from task AND SHUTDOWN the server...")
proc.Host().Shutdown(context.TODO())
return
}
}
}
}
func SchedulerSchedule() {
h := New(&http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
}),
})
logger := log.New(os.Stdout, "Supervisor: ", 0)
delaySeconds := 2
mytask := myTestTask{
delay: time.Duration(delaySeconds) * time.Second,
logger: logger,
}
cancel := h.Schedule(mytask)
ln, err := net.Listen("tcp4", ":9090")
if err != nil {
panic(err.Error())
}
logger.Println("server started...")
logger.Println("we will cancel the task after 2 runs (the third will be canceled)")
cancelAfterRuns := 2
time.AfterFunc(time.Duration(delaySeconds*cancelAfterRuns+(delaySeconds/2))*time.Second, func() {
cancel()
logger.Println("cancel sent")
})
h.Serve(ln)
// Output:
// Supervisor: server started...
// Supervisor: we will cancel the task after 2 runs (the third will be canceled)
// Supervisor: 1
// Supervisor: 2
// Supervisor: cancel sent
// Supervisor: canceled, exiting from task AND SHUTDOWN the server...
}

227
core/host/supervisor.go Normal file
View File

@@ -0,0 +1,227 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"context"
"crypto/tls"
"net"
"net/http"
"os"
"os/signal"
"sync"
"sync/atomic"
"github.com/kataras/iris/core/errors"
"github.com/kataras/iris/core/nettools"
"golang.org/x/crypto/acme/autocert"
)
// Supervisor is the wrapper and the manager for a compatible server
// and it's relative actions, called Tasks.
//
// Interfaces are separated to return relative functionality to them.
type Supervisor struct {
Scheduler
server *http.Server
closedManually int32 // future use, accessed atomically (non-zero means we've called the Shutdown)
shouldWait int32 // non-zero means that the host should wait for unblocking
unblockChan chan struct{}
shutdownChan chan struct{}
errChan chan error
mu sync.Mutex
}
func New(srv *http.Server) *Supervisor {
return &Supervisor{
server: srv,
unblockChan: make(chan struct{}, 1),
shutdownChan: make(chan struct{}),
errChan: make(chan error),
}
}
func (su *Supervisor) DeferFlow() {
atomic.StoreInt32(&su.shouldWait, 1)
}
func (su *Supervisor) RestoreFlow() {
if su.isWaiting() {
atomic.StoreInt32(&su.shouldWait, 0)
su.mu.Lock()
su.unblockChan <- struct{}{}
su.mu.Unlock()
}
}
func (su *Supervisor) isWaiting() bool {
return atomic.LoadInt32(&su.shouldWait) != 0
}
// Done is being received when in server Shutdown.
// This can be used to gracefully shutdown connections that have
// undergone NPN/ALPN protocol upgrade or that have been hijacked.
// This function should start protocol-specific graceful shutdown,
// but should not wait for shutdown to complete.
func (su *Supervisor) Done() <-chan struct{} {
return su.shutdownChan
}
// Err refences to the return value of Server's .Serve, not the server's specific error logger.
func (su *Supervisor) Err() <-chan error {
return su.errChan
}
func (su *Supervisor) notifyShutdown() {
go func() {
su.shutdownChan <- struct{}{}
}()
su.Scheduler.notifyShutdown()
}
func (su *Supervisor) notifyErr(err error) {
// if err == http.ErrServerClosed {
// return
// }
go func() {
su.errChan <- err
}()
su.Scheduler.notifyErr(err)
}
/// TODO:
// Remove all channels, do it with events
// or with channels but with a different channel on each task proc
// I don't know channels are not so safe, when go func and race risk..
// so better with callbacks....
func (su *Supervisor) supervise(blockFunc func() error) error {
// println("Running Serve from Supervisor")
// su.server: in order to Serve and Shutdown the underline server and no re-run the supervisors when .Shutdown -> .Serve.
// su.GetBlocker: set the Block() and Unblock(), which are checked after a shutdown or error.
// su.GetNotifier: only one supervisor is allowed to be notified about Close/Shutdown and Err.
// su.log: set this builder's logger in order to supervisor to be able to share a common logger.
host := createTaskHost(su)
// run the list of supervisors in different go-tasks by-design.
su.Scheduler.runOnServe(host)
if len(su.Scheduler.onInterruptTasks) > 0 {
// this can't be moved to the task interrupt's `Run` function
// because it will not catch more than one ctrl/cmd+c, so
// we do it here. These tasks are canceled already too.
go func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, os.Kill)
select {
case <-ch:
su.Scheduler.runOnInterrupt(host)
}
}()
}
err := blockFunc()
su.notifyErr(err)
if su.isWaiting() {
blockStatement:
for {
select {
case <-su.unblockChan:
break blockStatement
}
}
}
return err // start the server
}
func (su *Supervisor) newListener() (net.Listener, error) {
// this will not work on "unix" as network
// because UNIX doesn't supports the kind of
// restarts we may want for the server.
//
// User still be able to call .Serve instead.
l, err := nettools.TCPKeepAlive(su.server.Addr)
if err != nil {
return nil, err
}
if nettools.IsTLS(su.server) {
// means tls
tlsl := tls.NewListener(l, su.server.TLSConfig)
return tlsl, nil
}
return l, nil
}
func (su *Supervisor) Serve(l net.Listener) error {
return su.supervise(func() error { return su.server.Serve(l) })
}
func (su *Supervisor) ListenAndServe() error {
l, err := su.newListener()
if err != nil {
return err
}
return su.Serve(l)
}
func setupHTTP2(cfg *tls.Config) {
cfg.NextProtos = append(cfg.NextProtos, "h2") // HTTP2
}
func (su *Supervisor) ListenAndServeTLS(certFile string, keyFile string) error {
if certFile == "" || keyFile == "" {
return errors.New("certFile or keyFile missing")
}
cfg := new(tls.Config)
var err error
cfg.Certificates = make([]tls.Certificate, 1)
if cfg.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile); err != nil {
return err
}
setupHTTP2(cfg)
su.server.TLSConfig = cfg
return su.ListenAndServe()
}
func (su *Supervisor) ListenAndServeAutoTLS() error {
autoTLSManager := autocert.Manager{
Prompt: autocert.AcceptTOS,
}
cfg := new(tls.Config)
cfg.GetCertificate = autoTLSManager.GetCertificate
setupHTTP2(cfg)
su.server.TLSConfig = cfg
return su.ListenAndServe()
}
// Shutdown gracefully shuts down the server without interrupting any
// active connections. Shutdown works by first closing all open
// listeners, then closing all idle connections, and then waiting
// indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete,
// then the context's error is returned.
//
// Shutdown does not attempt to close nor wait for hijacked
// connections such as WebSockets. The caller of Shutdown should
// separately notify such long-lived connections of shutdown and wait
// for them to close, if desired.
func (su *Supervisor) Shutdown(ctx context.Context) error {
// println("Running Shutdown from Supervisor")
atomic.AddInt32(&su.closedManually, 1) // future-use
su.notifyShutdown()
return su.server.Shutdown(ctx)
}

View File

@@ -0,0 +1,112 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"bytes"
"context"
"crypto/tls"
"log"
"net"
"net/http"
"strings"
"testing"
"github.com/iris-contrib/httpexpect"
)
const (
debug = false
)
func newTester(t *testing.T, baseURL string, handler http.Handler) *httpexpect.Expect {
var transporter http.RoundTripper
if strings.HasPrefix(baseURL, "http") { // means we are testing real serve time
transporter = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
} else { // means we are testing the handler itself
transporter = httpexpect.NewBinder(handler)
}
testConfiguration := httpexpect.Config{
BaseURL: baseURL,
Client: &http.Client{
Transport: transporter,
Jar: httpexpect.NewJar(),
},
Reporter: httpexpect.NewAssertReporter(t),
}
if debug {
testConfiguration.Printers = []httpexpect.Printer{
httpexpect.NewDebugPrinter(t, true),
}
}
return httpexpect.WithConfig(testConfiguration)
}
func testSupervisor(t *testing.T, creator func(*http.Server, []TaskRunner) *Supervisor) {
loggerOutput := &bytes.Buffer{}
logger := log.New(loggerOutput, "", 0)
const (
expectedHelloMessage = "Hello\n"
)
// http routing
var (
expectedBody = "this is the response body\n"
)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(expectedBody))
})
// host (server wrapper and adapter) construction
srv := &http.Server{Handler: mux, ErrorLog: logger}
addr := "localhost:5525"
// serving
ln, err := net.Listen("tcp4", addr)
if err != nil {
t.Fatal(err)
}
helloMe := TaskRunnerFunc(func(proc TaskProcess) {
logger.Print(expectedHelloMessage)
})
host := creator(srv, []TaskRunner{helloMe})
defer host.Shutdown(context.TODO())
go host.Serve(ln)
// http testsing and various calls
// no need for time sleep because the following will take some time by theirselves
tester := newTester(t, "http://"+addr, mux)
tester.Request("GET", "/").Expect().Status(http.StatusOK).Body().Equal(expectedBody)
// WARNING: Data Race here because we try to read the logs
// but it's "safe" here.
// testing Task (recorded) message:
if got := loggerOutput.String(); expectedHelloMessage != got {
t.Fatalf("expected hello Task's message to be '%s' but got '%s'", expectedHelloMessage, got)
}
}
func TestSupervisor(t *testing.T) {
testSupervisor(t, func(srv *http.Server, tasks []TaskRunner) *Supervisor {
su := New(srv)
for _, t := range tasks {
su.Schedule(t)
}
return su
})
}

131
core/host/task.go Normal file
View File

@@ -0,0 +1,131 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
// the 24hour name was "Supervisor" but it's not cover its usage
// 100%, best name is Task or Thead, I'll chouse Task.
// and re-name the host to "Supervisor" because that is the really
// supervisor.
import (
"context"
"github.com/kataras/iris/core/nettools"
"net/http"
"os"
)
type (
FlowController interface {
DeferFlow()
RestoreFlow()
}
)
type TaskHost struct {
su *Supervisor
// Supervisor with access fields when server is running, i.e restrict access to "Schedule"
// Server that running, is active and open
// Flow controller
FlowController
// Various
pid int
doneChan chan struct{}
errChan chan error
}
func (h TaskHost) Done() <-chan struct{} {
return h.doneChan
}
func (h TaskHost) Err() <-chan error {
return h.errChan
}
func (h TaskHost) Serve() error {
// the underline server's serve, using the "latest known" listener from the supervisor.
l, err := h.su.newListener()
if err != nil {
return err
}
// if http.serverclosed ignroe the error, it will have this error
// from the previous close
if err := h.su.server.Serve(l); err != http.ErrServerClosed {
return err
}
return nil
}
// HostURL returns the listening full url (scheme+host)
// based on the supervisor's server's address.
func (h TaskHost) HostURL() string {
return nettools.ResolveURLFromServer(h.su.server)
}
// Hostname returns the underline server's hostname.
func (h TaskHost) Hostname() string {
return nettools.ResolveHostname(h.su.server.Addr)
}
func (h TaskHost) Shutdown(ctx context.Context) error {
// the underline server's Shutdown (otherwise we will cancel all tasks and do cycles)
return h.su.server.Shutdown(ctx)
}
func (h TaskHost) PID() int {
return h.pid
}
type TaskProcess struct {
canceledChan chan struct{}
host TaskHost
}
func (p TaskProcess) Done() <-chan struct{} {
return p.canceledChan
}
func (p TaskProcess) Host() TaskHost {
return p.host
}
func createTaskHost(su *Supervisor) TaskHost {
host := TaskHost{
su: su,
FlowController: su,
doneChan: make(chan struct{}),
errChan: make(chan error),
pid: os.Getpid(),
}
return host
}
func newTaskProcess(host TaskHost) TaskProcess {
return TaskProcess{
host: host,
canceledChan: make(chan struct{}),
}
}
// A TaskRunner is an independent stream of instructions in a Supervisor.
// A routine is similar to a sequential program.
// However, a routine itself is not a program,
// it can't run on its own, instead it runs within a Supervisor's context.
//
// The real usage of a routine is not about a single sequential thread,
// but rather using multiple tasks in a single Supervisor.
// Multiple tasks running at the same time and performing various tasks is referred as Multithreading.
// A Task is considered to be a lightweight process because it runs within the context of a Supervisor
// and takes advantage of resources allocated for that Supervisor and its Server.
type TaskRunner interface {
Run(TaskProcess)
}
type TaskRunnerFunc func(TaskProcess)
func (s TaskRunnerFunc) Run(proc TaskProcess) {
s(proc)
}

23
core/host/task_banner.go Normal file
View File

@@ -0,0 +1,23 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"fmt"
"io"
"runtime"
)
func WriteBannerTask(w io.Writer, banner string) TaskRunnerFunc {
return func(proc TaskProcess) {
listeningURI := proc.Host().HostURL()
interruptkey := "CTRL"
if runtime.GOOS == "darwin" {
interruptkey = "CMD"
}
w.Write([]byte(fmt.Sprintf("%s\n\nNow listening on: %s\nApplication started. Press %s+C to shut down.\n",
banner, listeningURI, interruptkey)))
}
}

View File

@@ -0,0 +1,47 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"context"
"fmt"
"net/http"
"time"
)
func TaskHostError() {
su := New(&http.Server{Addr: ":8273", Handler: http.DefaultServeMux})
su.ScheduleFunc(func(proc TaskProcess) {
select {
case err := <-proc.Host().Err():
fmt.Println(err.Error())
}
})
su.ScheduleFunc(func(proc TaskProcess) {
select {
case err := <-proc.Host().Err():
fmt.Println(err.Error())
}
})
su.ScheduleFunc(func(proc TaskProcess) {
select {
case err := <-proc.Host().Err():
fmt.Println(err.Error())
}
})
go su.ListenAndServe()
time.Sleep(1 * time.Second)
su.Shutdown(context.TODO())
time.Sleep(1 * time.Second)
// Output:
// http: Server closed
// http: Server closed
// http: Server closed
}

View File

@@ -0,0 +1,47 @@
// +build !linux
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"context"
"time"
"github.com/kataras/iris/core/gui"
)
// ShowTrayTask is a supervisor's built'n task which shows
// the iris tray icon to the taskbar (cross-platform).
//
// It's responsible for the server's status button.
func ShowTrayTask(version string, shutdownTimeout time.Duration) TaskRunnerFunc {
return func(proc TaskProcess) {
t := gui.Tray
// set the label "Version" to the framework's current Version.
t.SetVersion(version)
// active the status button(online/offline).
t.OnServerStatusChange(
// set the first callback (pressed when unchecked).
func() {
go proc.Host().Serve()
},
// set the second call back (pressed when checked, default status with its label setted to :"Offline".
func() {
// when server is shutting down it will send an "http closed" error ,
// that DeferFlow stops from returning that error and exiting the app
// postpone the execution flow, the interrupt signal will restore the flow
// when ctrl/cmd+C pressed.
proc.Host().DeferFlow()
ctx, cancel := context.WithTimeout(context.TODO(), shutdownTimeout)
defer cancel()
proc.Host().Shutdown(ctx)
})
// render the tray icon and block this scheduled task(goroutine.
t.Show()
}
}

View File

@@ -0,0 +1,28 @@
// +build linux
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"os"
"time"
)
// ShowTrayTask is a supervisor's built'n task which shows
// the iris tray icon to the taskbar (cross-platform).
//
// It's responsible for the server's status button.
func ShowTrayTask(version string, shutdownTimeout time.Duration) TaskRunnerFunc {
return func(proc TaskProcess) {
os.Stdout.WriteString("Tray icon is not enabled by-default for linux systems,\nyou have to install a dependency first and re-get the Iris pgk:\n")
os.Stdout.WriteString("$ sudo apt-get install libgtk-3-dev libappindicator3-dev\n")
os.Stdout.WriteString("$ go get -u github.com/kataras/iris\n")
// manually:
// os.Stdout.WriteString("remove $GOPATH/src/github.com/kataras/iris/core/host/task_gui_tray_linux.go\n")
// os.Stdout.WriteString("edit $GOPATH/src/github.com/kataras/iris/core/host/task_gui_tray.go and remove the // +build !linux\n")
// os.Stdout.WriteString("edit $GOPATH/src/github.com/kataras/iris/core/gui/tray.go and remove the // +build !linux\n")
}
}

View File

@@ -0,0 +1,29 @@
// Copyright 2017 Gerasimos Maropoulos, ΓΜ. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package host
import (
"context"
"time"
)
// OnInterrupt is a built'n supervisor task type which fires its
// value(Task) when an OS interrupt/kill signal received.
type OnInterrupt TaskRunnerFunc
func (t OnInterrupt) Run(proc TaskProcess) {
t(proc)
}
// ShutdownOnInterruptTask returns a supervisor's built'n task which
// shutdowns the server when InterruptSignalTask fire this task.
func ShutdownOnInterruptTask(shutdownTimeout time.Duration) TaskRunner {
return OnInterrupt(func(proc TaskProcess) {
ctx, cancel := context.WithTimeout(context.TODO(), shutdownTimeout)
defer cancel()
proc.Host().Shutdown(ctx)
proc.Host().RestoreFlow()
})
}