1
0
mirror of https://github.com/jhillyerd/inbucket.git synced 2025-12-17 17:47:03 +00:00

Reorganize packages, closes #79

- All packages go into either cmd or pkg directories
- Most packages renamed
- Server packages moved into pkg/server
- sanitize moved into webui, as that's the only place it's used
- filestore moved into pkg/storage/file
- Makefile updated, and PKG variable use fixed
This commit is contained in:
James Hillyerd
2018-03-09 19:32:45 -08:00
parent f00b9ddef0
commit f8c30a678a
55 changed files with 225 additions and 220 deletions

56
pkg/storage/datastore.go Normal file
View File

@@ -0,0 +1,56 @@
// Package datastore contains implementation independent datastore logic
package datastore
import (
"errors"
"io"
"net/mail"
"sync"
"time"
"github.com/jhillyerd/enmime"
)
var (
// ErrNotExist indicates the requested message does not exist
ErrNotExist = errors.New("Message does not exist")
// ErrNotWritable indicates the message is closed; no longer writable
ErrNotWritable = errors.New("Message not writable")
)
// DataStore is an interface to get Mailboxes stored in Inbucket
type DataStore interface {
MailboxFor(emailAddress string) (Mailbox, error)
AllMailboxes() ([]Mailbox, error)
// LockFor is a temporary hack to fix #77 until Datastore revamp
LockFor(emailAddress string) (*sync.RWMutex, error)
}
// Mailbox is an interface to get and manipulate messages in a DataStore
type Mailbox interface {
GetMessages() ([]Message, error)
GetMessage(id string) (Message, error)
Purge() error
NewMessage() (Message, error)
Name() string
String() string
}
// Message is an interface for a single message in a Mailbox
type Message interface {
ID() string
From() string
To() []string
Date() time.Time
Subject() string
RawReader() (reader io.ReadCloser, err error)
ReadHeader() (msg *mail.Message, err error)
ReadBody() (body *enmime.Envelope, err error)
ReadRaw() (raw *string, err error)
Append(data []byte) error
Close() error
Delete() error
String() string
Size() int64
}

View File

@@ -0,0 +1,270 @@
package filestore
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"net/mail"
"os"
"path/filepath"
"time"
"github.com/jhillyerd/enmime"
"github.com/jhillyerd/inbucket/pkg/storage"
"github.com/jhillyerd/inbucket/pkg/log"
)
// FileMessage implements Message and contains a little bit of data about a
// particular email message, and methods to retrieve the rest of it from disk.
type FileMessage struct {
mailbox *FileMailbox
// Stored in GOB
Fid string
Fdate time.Time
Ffrom string
Fto []string
Fsubject string
Fsize int64
// These are for creating new messages only
writable bool
writerFile *os.File
writer *bufio.Writer
}
// NewMessage creates a new FileMessage object and sets the Date and Id fields.
// It will also delete messages over messageCap if configured.
func (mb *FileMailbox) NewMessage() (datastore.Message, error) {
// Load index
if !mb.indexLoaded {
if err := mb.readIndex(); err != nil {
return nil, err
}
}
// Delete old messages over messageCap
if mb.store.messageCap > 0 {
for len(mb.messages) >= mb.store.messageCap {
log.Infof("Mailbox %q over configured message cap", mb.name)
if err := mb.messages[0].Delete(); err != nil {
log.Errorf("Error deleting message: %s", err)
}
}
}
date := time.Now()
id := generateID(date)
return &FileMessage{mailbox: mb, Fid: id, Fdate: date, writable: true}, nil
}
// ID gets the ID of the Message
func (m *FileMessage) ID() string {
return m.Fid
}
// Date returns the date/time this Message was received by Inbucket
func (m *FileMessage) Date() time.Time {
return m.Fdate
}
// From returns the value of the Message From header
func (m *FileMessage) From() string {
return m.Ffrom
}
// To returns the value of the Message To header
func (m *FileMessage) To() []string {
return m.Fto
}
// Subject returns the value of the Message Subject header
func (m *FileMessage) Subject() string {
return m.Fsubject
}
// String returns a string in the form: "Subject()" from From()
func (m *FileMessage) String() string {
return fmt.Sprintf("\"%v\" from %v", m.Fsubject, m.Ffrom)
}
// Size returns the size of the Message on disk in bytes
func (m *FileMessage) Size() int64 {
return m.Fsize
}
func (m *FileMessage) rawPath() string {
return filepath.Join(m.mailbox.path, m.Fid+".raw")
}
// ReadHeader opens the .raw portion of a Message and returns a standard Go mail.Message object
func (m *FileMessage) ReadHeader() (msg *mail.Message, err error) {
file, err := os.Open(m.rawPath())
if err != nil {
return nil, err
}
defer func() {
if err := file.Close(); err != nil {
log.Errorf("Failed to close %q: %v", m.rawPath(), err)
}
}()
reader := bufio.NewReader(file)
return mail.ReadMessage(reader)
}
// ReadBody opens the .raw portion of a Message and returns a MIMEBody object
func (m *FileMessage) ReadBody() (body *enmime.Envelope, err error) {
file, err := os.Open(m.rawPath())
if err != nil {
return nil, err
}
defer func() {
if err := file.Close(); err != nil {
log.Errorf("Failed to close %q: %v", m.rawPath(), err)
}
}()
reader := bufio.NewReader(file)
mime, err := enmime.ReadEnvelope(reader)
if err != nil {
return nil, err
}
return mime, nil
}
// RawReader opens the .raw portion of a Message as an io.ReadCloser
func (m *FileMessage) RawReader() (reader io.ReadCloser, err error) {
file, err := os.Open(m.rawPath())
if err != nil {
return nil, err
}
return file, nil
}
// ReadRaw opens the .raw portion of a Message and returns it as a string
func (m *FileMessage) ReadRaw() (raw *string, err error) {
reader, err := m.RawReader()
if err != nil {
return nil, err
}
defer func() {
if err := reader.Close(); err != nil {
log.Errorf("Failed to close %q: %v", m.rawPath(), err)
}
}()
bodyBytes, err := ioutil.ReadAll(bufio.NewReader(reader))
if err != nil {
return nil, err
}
bodyString := string(bodyBytes)
return &bodyString, nil
}
// Append data to a newly opened Message, this will fail on a pre-existing Message and
// after Close() is called.
func (m *FileMessage) Append(data []byte) error {
// Prevent Appending to a pre-existing Message
if !m.writable {
return datastore.ErrNotWritable
}
// Open file for writing if we haven't yet
if m.writer == nil {
// Ensure mailbox directory exists
if err := m.mailbox.createDir(); err != nil {
return err
}
file, err := os.Create(m.rawPath())
if err != nil {
// Set writable false just in case something calls me a million times
m.writable = false
return err
}
m.writerFile = file
m.writer = bufio.NewWriter(file)
}
_, err := m.writer.Write(data)
m.Fsize += int64(len(data))
return err
}
// Close this Message for writing - no more data may be Appended. Close() will also
// trigger the creation of the .gob file.
func (m *FileMessage) Close() error {
// nil out the writer fields so they can't be used
writer := m.writer
writerFile := m.writerFile
m.writer = nil
m.writerFile = nil
if writer != nil {
if err := writer.Flush(); err != nil {
return err
}
}
if writerFile != nil {
if err := writerFile.Close(); err != nil {
return err
}
}
// Fetch headers
body, err := m.ReadBody()
if err != nil {
return err
}
// Only public fields are stored in gob, hence starting with capital F
// Parse From address
if address, err := mail.ParseAddress(body.GetHeader("From")); err == nil {
m.Ffrom = address.String()
} else {
m.Ffrom = body.GetHeader("From")
}
m.Fsubject = body.GetHeader("Subject")
// Turn the To header into a slice
if addresses, err := body.AddressList("To"); err == nil {
for _, a := range addresses {
m.Fto = append(m.Fto, a.String())
}
} else {
m.Fto = []string{body.GetHeader("To")}
}
// Refresh the index before adding our message
err = m.mailbox.readIndex()
if err != nil {
return err
}
// Made it this far without errors, add it to the index
m.mailbox.messages = append(m.mailbox.messages, m)
return m.mailbox.writeIndex()
}
// Delete this Message from disk by removing it from the index and deleting the
// raw files.
func (m *FileMessage) Delete() error {
messages := m.mailbox.messages
for i, mm := range messages {
if m == mm {
// Slice around message we are deleting
m.mailbox.messages = append(messages[:i], messages[i+1:]...)
break
}
}
if err := m.mailbox.writeIndex(); err != nil {
return err
}
if len(m.mailbox.messages) == 0 {
// This was the last message, thus writeIndex() has removed the entire
// directory; we don't need to delete the raw file.
return nil
}
// There are still messages in the index
log.Tracef("Deleting %v", m.rawPath())
return os.Remove(m.rawPath())
}

367
pkg/storage/file/fstore.go Normal file
View File

@@ -0,0 +1,367 @@
package filestore
import (
"bufio"
"encoding/gob"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"github.com/jhillyerd/inbucket/pkg/config"
"github.com/jhillyerd/inbucket/pkg/storage"
"github.com/jhillyerd/inbucket/pkg/log"
"github.com/jhillyerd/inbucket/pkg/stringutil"
)
// Name of index file in each mailbox
const indexFileName = "index.gob"
var (
// indexMx is locked while reading/writing an index file
//
// NOTE: This is a bottleneck because it's a single lock even if we have a
// million index files
indexMx = new(sync.RWMutex)
// dirMx is locked while creating/removing directories
dirMx = new(sync.Mutex)
// countChannel is filled with a sequential numbers (0000..9999), which are
// used by generateID() to generate unique message IDs. It's global
// because we only want one regardless of the number of DataStore objects
countChannel = make(chan int, 10)
)
func init() {
// Start generator
go countGenerator(countChannel)
}
// Populates the channel with numbers
func countGenerator(c chan int) {
for i := 0; true; i = (i + 1) % 10000 {
c <- i
}
}
// FileDataStore implements DataStore aand is the root of the mail storage
// hiearchy. It provides access to Mailbox objects
type FileDataStore struct {
hashLock datastore.HashLock
path string
mailPath string
messageCap int
}
// NewFileDataStore creates a new DataStore object using the specified path
func NewFileDataStore(cfg config.DataStoreConfig) datastore.DataStore {
path := cfg.Path
if path == "" {
log.Errorf("No value configured for datastore path")
return nil
}
mailPath := filepath.Join(path, "mail")
if _, err := os.Stat(mailPath); err != nil {
// Mail datastore does not yet exist
if err = os.MkdirAll(mailPath, 0770); err != nil {
log.Errorf("Error creating dir %q: %v", mailPath, err)
}
}
return &FileDataStore{path: path, mailPath: mailPath, messageCap: cfg.MailboxMsgCap}
}
// DefaultFileDataStore creates a new DataStore object. It uses the inbucket.Config object to
// construct it's path.
func DefaultFileDataStore() datastore.DataStore {
cfg := config.GetDataStoreConfig()
return NewFileDataStore(cfg)
}
// MailboxFor retrieves the Mailbox object for a specified email address, if the mailbox
// does not exist, it will attempt to create it.
func (ds *FileDataStore) MailboxFor(emailAddress string) (datastore.Mailbox, error) {
name, err := stringutil.ParseMailboxName(emailAddress)
if err != nil {
return nil, err
}
dir := stringutil.HashMailboxName(name)
s1 := dir[0:3]
s2 := dir[0:6]
path := filepath.Join(ds.mailPath, s1, s2, dir)
indexPath := filepath.Join(path, indexFileName)
return &FileMailbox{store: ds, name: name, dirName: dir, path: path,
indexPath: indexPath}, nil
}
// AllMailboxes returns a slice with all Mailboxes
func (ds *FileDataStore) AllMailboxes() ([]datastore.Mailbox, error) {
mailboxes := make([]datastore.Mailbox, 0, 100)
infos1, err := ioutil.ReadDir(ds.mailPath)
if err != nil {
return nil, err
}
// Loop over level 1 directories
for _, inf1 := range infos1 {
if inf1.IsDir() {
l1 := inf1.Name()
infos2, err := ioutil.ReadDir(filepath.Join(ds.mailPath, l1))
if err != nil {
return nil, err
}
// Loop over level 2 directories
for _, inf2 := range infos2 {
if inf2.IsDir() {
l2 := inf2.Name()
infos3, err := ioutil.ReadDir(filepath.Join(ds.mailPath, l1, l2))
if err != nil {
return nil, err
}
// Loop over mailboxes
for _, inf3 := range infos3 {
if inf3.IsDir() {
mbdir := inf3.Name()
mbpath := filepath.Join(ds.mailPath, l1, l2, mbdir)
idx := filepath.Join(mbpath, indexFileName)
mb := &FileMailbox{store: ds, dirName: mbdir, path: mbpath,
indexPath: idx}
mailboxes = append(mailboxes, mb)
}
}
}
}
}
}
return mailboxes, nil
}
func (ds *FileDataStore) LockFor(emailAddress string) (*sync.RWMutex, error) {
name, err := stringutil.ParseMailboxName(emailAddress)
if err != nil {
return nil, err
}
hash := stringutil.HashMailboxName(name)
return ds.hashLock.Get(hash), nil
}
// FileMailbox implements Mailbox, manages the mail for a specific user and
// correlates to a particular directory on disk.
type FileMailbox struct {
store *FileDataStore
name string
dirName string
path string
indexLoaded bool
indexPath string
messages []*FileMessage
}
// Name of the mailbox
func (mb *FileMailbox) Name() string {
return mb.name
}
// String renders the name and directory path of the mailbox
func (mb *FileMailbox) String() string {
return mb.name + "[" + mb.dirName + "]"
}
// GetMessages scans the mailbox directory for .gob files and decodes them into
// a slice of Message objects.
func (mb *FileMailbox) GetMessages() ([]datastore.Message, error) {
if !mb.indexLoaded {
if err := mb.readIndex(); err != nil {
return nil, err
}
}
messages := make([]datastore.Message, len(mb.messages))
for i, m := range mb.messages {
messages[i] = m
}
return messages, nil
}
// GetMessage decodes a single message by Id and returns a Message object
func (mb *FileMailbox) GetMessage(id string) (datastore.Message, error) {
if !mb.indexLoaded {
if err := mb.readIndex(); err != nil {
return nil, err
}
}
if id == "latest" && len(mb.messages) != 0 {
return mb.messages[len(mb.messages)-1], nil
}
for _, m := range mb.messages {
if m.Fid == id {
return m, nil
}
}
return nil, datastore.ErrNotExist
}
// Purge deletes all messages in this mailbox
func (mb *FileMailbox) Purge() error {
mb.messages = mb.messages[:0]
return mb.writeIndex()
}
// readIndex loads the mailbox index data from disk
func (mb *FileMailbox) readIndex() error {
// Clear message slice, open index
mb.messages = mb.messages[:0]
// Lock for reading
indexMx.RLock()
defer indexMx.RUnlock()
// Check if index exists
if _, err := os.Stat(mb.indexPath); err != nil {
// Does not exist, but that's not an error in our world
log.Tracef("Index %v does not exist (yet)", mb.indexPath)
mb.indexLoaded = true
return nil
}
file, err := os.Open(mb.indexPath)
if err != nil {
return err
}
defer func() {
if err := file.Close(); err != nil {
log.Errorf("Failed to close %q: %v", mb.indexPath, err)
}
}()
// Decode gob data
dec := gob.NewDecoder(bufio.NewReader(file))
for {
msg := new(FileMessage)
if err = dec.Decode(msg); err != nil {
if err == io.EOF {
// It's OK to get an EOF here
break
}
return fmt.Errorf("Corrupt mailbox %q: %v", mb.indexPath, err)
}
msg.mailbox = mb
mb.messages = append(mb.messages, msg)
}
mb.indexLoaded = true
return nil
}
// writeIndex overwrites the index on disk with the current mailbox data
func (mb *FileMailbox) writeIndex() error {
// Lock for writing
indexMx.Lock()
defer indexMx.Unlock()
if len(mb.messages) > 0 {
// Ensure mailbox directory exists
if err := mb.createDir(); err != nil {
return err
}
// Open index for writing
file, err := os.Create(mb.indexPath)
if err != nil {
return err
}
writer := bufio.NewWriter(file)
// Write each message and then flush
enc := gob.NewEncoder(writer)
for _, m := range mb.messages {
err = enc.Encode(m)
if err != nil {
_ = file.Close()
return err
}
}
if err := writer.Flush(); err != nil {
_ = file.Close()
return err
}
if err := file.Close(); err != nil {
log.Errorf("Failed to close %q: %v", mb.indexPath, err)
return err
}
} else {
// No messages, delete index+maildir
log.Tracef("Removing mailbox %v", mb.path)
return mb.removeDir()
}
return nil
}
// createDir checks for the presence of the path for this mailbox, creates it if needed
func (mb *FileMailbox) createDir() error {
dirMx.Lock()
defer dirMx.Unlock()
if _, err := os.Stat(mb.path); err != nil {
if err := os.MkdirAll(mb.path, 0770); err != nil {
log.Errorf("Failed to create directory %v, %v", mb.path, err)
return err
}
}
return nil
}
// removeDir removes the mailbox, plus empty higher level directories
func (mb *FileMailbox) removeDir() error {
dirMx.Lock()
defer dirMx.Unlock()
// remove mailbox dir, including index file
if err := os.RemoveAll(mb.path); err != nil {
return err
}
// remove parents if empty
dir := filepath.Dir(mb.path)
if removeDirIfEmpty(dir) {
removeDirIfEmpty(filepath.Dir(dir))
}
return nil
}
// removeDirIfEmpty will remove the specified directory if it contains no files or directories.
// Caller should hold dirMx. Returns true if dir was removed.
func removeDirIfEmpty(path string) (removed bool) {
f, err := os.Open(path)
if err != nil {
return false
}
files, err := f.Readdirnames(0)
_ = f.Close()
if err != nil {
return false
}
if len(files) > 0 {
// Dir not empty
return false
}
log.Tracef("Removing dir %v", path)
err = os.Remove(path)
if err != nil {
log.Errorf("Failed to remove %q: %v", path, err)
return false
}
return true
}
// generatePrefix converts a Time object into the ISO style format we use
// as a prefix for message files. Note: It is used directly by unit
// tests.
func generatePrefix(date time.Time) string {
return date.Format("20060102T150405")
}
// generateId adds a 4-digit unique number onto the end of the string
// returned by generatePrefix()
func generateID(date time.Time) string {
return generatePrefix(date) + "-" + fmt.Sprintf("%04d", <-countChannel)
}

View File

@@ -0,0 +1,583 @@
package filestore
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"testing"
"time"
"github.com/jhillyerd/inbucket/pkg/config"
"github.com/stretchr/testify/assert"
)
// Test directory structure created by filestore
func TestFSDirStructure(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
root := ds.path
// james hashes to 474ba67bdb289c6263b36dfd8a7bed6c85b04943
mbName := "james"
// Check filestore root exists
assert.True(t, isDir(root), "Expected %q to be a directory", root)
// Check mail dir exists
expect := filepath.Join(root, "mail")
assert.True(t, isDir(expect), "Expected %q to be a directory", expect)
// Check first hash section does not exist
expect = filepath.Join(root, "mail", "474")
assert.False(t, isDir(expect), "Expected %q to not exist", expect)
// Deliver test message
id1, _ := deliverMessage(ds, mbName, "test", time.Now())
// Check path to message exists
assert.True(t, isDir(expect), "Expected %q to be a directory", expect)
expect = filepath.Join(expect, "474ba6")
assert.True(t, isDir(expect), "Expected %q to be a directory", expect)
expect = filepath.Join(expect, "474ba67bdb289c6263b36dfd8a7bed6c85b04943")
assert.True(t, isDir(expect), "Expected %q to be a directory", expect)
// Check files
mbPath := expect
expect = filepath.Join(mbPath, "index.gob")
assert.True(t, isFile(expect), "Expected %q to be a file", expect)
expect = filepath.Join(mbPath, id1+".raw")
assert.True(t, isFile(expect), "Expected %q to be a file", expect)
// Deliver second test message
id2, _ := deliverMessage(ds, mbName, "test 2", time.Now())
// Check files
expect = filepath.Join(mbPath, "index.gob")
assert.True(t, isFile(expect), "Expected %q to be a file", expect)
expect = filepath.Join(mbPath, id2+".raw")
assert.True(t, isFile(expect), "Expected %q to be a file", expect)
// Delete message
mb, err := ds.MailboxFor(mbName)
assert.Nil(t, err)
msg, err := mb.GetMessage(id1)
assert.Nil(t, err)
err = msg.Delete()
assert.Nil(t, err)
// Message should be removed
expect = filepath.Join(mbPath, id1+".raw")
assert.False(t, isPresent(expect), "Did not expect %q to exist", expect)
expect = filepath.Join(mbPath, "index.gob")
assert.True(t, isFile(expect), "Expected %q to be a file", expect)
// Delete message
msg, err = mb.GetMessage(id2)
assert.Nil(t, err)
err = msg.Delete()
assert.Nil(t, err)
// Message should be removed
expect = filepath.Join(mbPath, id2+".raw")
assert.False(t, isPresent(expect), "Did not expect %q to exist", expect)
// No messages, index & maildir should be removed
expect = filepath.Join(mbPath, "index.gob")
assert.False(t, isPresent(expect), "Did not expect %q to exist", expect)
expect = mbPath
assert.False(t, isPresent(expect), "Did not expect %q to exist", expect)
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test FileDataStore.AllMailboxes()
func TestFSAllMailboxes(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
for _, name := range []string{"abby", "bill", "christa", "donald", "evelyn"} {
// Create day old message
date := time.Now().Add(-24 * time.Hour)
deliverMessage(ds, name, "Old Message", date)
// Create current message
date = time.Now()
deliverMessage(ds, name, "New Message", date)
}
mboxes, err := ds.AllMailboxes()
assert.Nil(t, err)
assert.Equal(t, len(mboxes), 5)
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test delivering several messages to the same mailbox, meanwhile querying its
// contents with a new mailbox object each time
func TestFSDeliverMany(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
mbName := "fred"
subjects := []string{"alpha", "bravo", "charlie", "delta", "echo"}
for i, subj := range subjects {
// Check number of messages
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err := mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
assert.Equal(t, i, len(msgs), "Expected %v message(s), but got %v", i, len(msgs))
// Add a message
deliverMessage(ds, mbName, subj, time.Now())
}
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err := mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
assert.Equal(t, len(subjects), len(msgs), "Expected %v message(s), but got %v",
len(subjects), len(msgs))
// Confirm delivery order
for i, expect := range subjects {
subj := msgs[i].Subject()
assert.Equal(t, expect, subj, "Expected subject %q, got %q", expect, subj)
}
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test deleting messages
func TestFSDelete(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
mbName := "fred"
subjects := []string{"alpha", "bravo", "charlie", "delta", "echo"}
for _, subj := range subjects {
// Add a message
deliverMessage(ds, mbName, subj, time.Now())
}
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err := mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
assert.Equal(t, len(subjects), len(msgs), "Expected %v message(s), but got %v",
len(subjects), len(msgs))
// Delete a couple messages
_ = msgs[1].Delete()
_ = msgs[3].Delete()
// Confirm deletion
mb, err = ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err = mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
subjects = []string{"alpha", "charlie", "echo"}
assert.Equal(t, len(subjects), len(msgs), "Expected %v message(s), but got %v",
len(subjects), len(msgs))
for i, expect := range subjects {
subj := msgs[i].Subject()
assert.Equal(t, expect, subj, "Expected subject %q, got %q", expect, subj)
}
// Try appending one more
deliverMessage(ds, mbName, "foxtrot", time.Now())
mb, err = ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err = mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
subjects = []string{"alpha", "charlie", "echo", "foxtrot"}
assert.Equal(t, len(subjects), len(msgs), "Expected %v message(s), but got %v",
len(subjects), len(msgs))
for i, expect := range subjects {
subj := msgs[i].Subject()
assert.Equal(t, expect, subj, "Expected subject %q, got %q", expect, subj)
}
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test purging a mailbox
func TestFSPurge(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
mbName := "fred"
subjects := []string{"alpha", "bravo", "charlie", "delta", "echo"}
for _, subj := range subjects {
// Add a message
deliverMessage(ds, mbName, subj, time.Now())
}
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err := mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
assert.Equal(t, len(subjects), len(msgs), "Expected %v message(s), but got %v",
len(subjects), len(msgs))
// Purge mailbox
err = mb.Purge()
assert.Nil(t, err)
// Confirm deletion
mb, err = ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err = mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
assert.Equal(t, len(msgs), 0, "Expected mailbox to have zero messages, got %v", len(msgs))
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test message size calculation
func TestFSSize(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
mbName := "fred"
subjects := []string{"a", "br", "much longer than the others"}
sentIds := make([]string, len(subjects))
sentSizes := make([]int64, len(subjects))
for i, subj := range subjects {
// Add a message
id, size := deliverMessage(ds, mbName, subj, time.Now())
sentIds[i] = id
sentSizes[i] = size
}
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
for i, id := range sentIds {
msg, err := mb.GetMessage(id)
assert.Nil(t, err)
expect := sentSizes[i]
size := msg.Size()
assert.Equal(t, expect, size, "Expected size of %v, got %v", expect, size)
}
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test missing files
func TestFSMissing(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
mbName := "fred"
subjects := []string{"a", "b", "c"}
sentIds := make([]string, len(subjects))
for i, subj := range subjects {
// Add a message
id, _ := deliverMessage(ds, mbName, subj, time.Now())
sentIds[i] = id
}
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
// Delete a message file without removing it from index
msg, err := mb.GetMessage(sentIds[1])
assert.Nil(t, err)
fmsg := msg.(*FileMessage)
_ = os.Remove(fmsg.rawPath())
msg, err = mb.GetMessage(sentIds[1])
assert.Nil(t, err)
// Try to read parts of message
_, err = msg.ReadHeader()
assert.Error(t, err)
_, err = msg.ReadBody()
assert.Error(t, err)
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test delivering several messages to the same mailbox, see if message cap works
func TestFSMessageCap(t *testing.T) {
mbCap := 10
ds, logbuf := setupDataStore(config.DataStoreConfig{MailboxMsgCap: mbCap})
defer teardownDataStore(ds)
mbName := "captain"
for i := 0; i < 20; i++ {
// Add a message
subj := fmt.Sprintf("subject %v", i)
deliverMessage(ds, mbName, subj, time.Now())
t.Logf("Delivered %q", subj)
// Check number of messages
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err := mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
if len(msgs) > mbCap {
t.Errorf("Mailbox should be capped at %v messages, but has %v", mbCap, len(msgs))
}
// Check that the first message is correct
first := i - mbCap + 1
if first < 0 {
first = 0
}
firstSubj := fmt.Sprintf("subject %v", first)
if firstSubj != msgs[0].Subject() {
t.Errorf("Expected first subject to be %q, got %q", firstSubj, msgs[0].Subject())
}
}
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test delivering several messages to the same mailbox, see if no message cap works
func TestFSNoMessageCap(t *testing.T) {
mbCap := 0
ds, logbuf := setupDataStore(config.DataStoreConfig{MailboxMsgCap: mbCap})
defer teardownDataStore(ds)
mbName := "captain"
for i := 0; i < 20; i++ {
// Add a message
subj := fmt.Sprintf("subject %v", i)
deliverMessage(ds, mbName, subj, time.Now())
t.Logf("Delivered %q", subj)
// Check number of messages
mb, err := ds.MailboxFor(mbName)
if err != nil {
t.Fatalf("Failed to MailboxFor(%q): %v", mbName, err)
}
msgs, err := mb.GetMessages()
if err != nil {
t.Fatalf("Failed to GetMessages for %q: %v", mbName, err)
}
if len(msgs) != i+1 {
t.Errorf("Expected %v messages, got %v", i+1, len(msgs))
}
}
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// Test Get the latest message
func TestGetLatestMessage(t *testing.T) {
ds, logbuf := setupDataStore(config.DataStoreConfig{})
defer teardownDataStore(ds)
// james hashes to 474ba67bdb289c6263b36dfd8a7bed6c85b04943
mbName := "james"
// Test empty mailbox
mb, err := ds.MailboxFor(mbName)
assert.Nil(t, err)
msg, err := mb.GetMessage("latest")
assert.Nil(t, msg)
assert.Error(t, err)
// Deliver test message
deliverMessage(ds, mbName, "test", time.Now())
// Deliver test message 2
id2, _ := deliverMessage(ds, mbName, "test 2", time.Now())
// Test get the latest message
mb, err = ds.MailboxFor(mbName)
assert.Nil(t, err)
msg, err = mb.GetMessage("latest")
assert.Nil(t, err)
assert.True(t, msg.ID() == id2, "Expected %q to be equal to %q", msg.ID(), id2)
// Deliver test message 3
id3, _ := deliverMessage(ds, mbName, "test 3", time.Now())
mb, err = ds.MailboxFor(mbName)
assert.Nil(t, err)
msg, err = mb.GetMessage("latest")
assert.Nil(t, err)
assert.True(t, msg.ID() == id3, "Expected %q to be equal to %q", msg.ID(), id3)
// Test wrong id
_, err = mb.GetMessage("wrongid")
assert.Error(t, err)
if t.Failed() {
// Wait for handler to finish logging
time.Sleep(2 * time.Second)
// Dump buffered log data if there was a failure
_, _ = io.Copy(os.Stderr, logbuf)
}
}
// setupDataStore creates a new FileDataStore in a temporary directory
func setupDataStore(cfg config.DataStoreConfig) (*FileDataStore, *bytes.Buffer) {
path, err := ioutil.TempDir("", "inbucket")
if err != nil {
panic(err)
}
// Capture log output
buf := new(bytes.Buffer)
log.SetOutput(buf)
cfg.Path = path
return NewFileDataStore(cfg).(*FileDataStore), buf
}
// deliverMessage creates and delivers a message to the specific mailbox, returning
// the size of the generated message.
func deliverMessage(ds *FileDataStore, mbName string, subject string,
date time.Time) (id string, size int64) {
// Build fake SMTP message for delivery
testMsg := make([]byte, 0, 300)
testMsg = append(testMsg, []byte("To: somebody@host\r\n")...)
testMsg = append(testMsg, []byte("From: somebodyelse@host\r\n")...)
testMsg = append(testMsg, []byte(fmt.Sprintf("Subject: %s\r\n", subject))...)
testMsg = append(testMsg, []byte("\r\n")...)
testMsg = append(testMsg, []byte("Test Body\r\n")...)
mb, err := ds.MailboxFor(mbName)
if err != nil {
panic(err)
}
// Create message object
id = generateID(date)
msg, err := mb.NewMessage()
if err != nil {
panic(err)
}
fmsg := msg.(*FileMessage)
fmsg.Fdate = date
fmsg.Fid = id
if err = msg.Append(testMsg); err != nil {
panic(err)
}
if err = msg.Close(); err != nil {
panic(err)
}
return id, int64(len(testMsg))
}
func teardownDataStore(ds *FileDataStore) {
if err := os.RemoveAll(ds.path); err != nil {
panic(err)
}
}
func isPresent(path string) bool {
_, err := os.Lstat(path)
return err == nil
}
func isFile(path string) bool {
if fi, err := os.Lstat(path); err == nil {
return !fi.IsDir()
}
return false
}
func isDir(path string) bool {
if fi, err := os.Lstat(path); err == nil {
return fi.IsDir()
}
return false
}

19
pkg/storage/lock.go Normal file
View File

@@ -0,0 +1,19 @@
package datastore
import (
"strconv"
"sync"
)
type HashLock [4096]sync.RWMutex
func (h *HashLock) Get(hash string) *sync.RWMutex {
if len(hash) < 3 {
return nil
}
i, err := strconv.ParseInt(hash[0:3], 16, 0)
if err != nil {
return nil
}
return &h[i]
}

61
pkg/storage/lock_test.go Normal file
View File

@@ -0,0 +1,61 @@
package datastore_test
import (
"testing"
"github.com/jhillyerd/inbucket/pkg/storage"
)
func TestHashLock(t *testing.T) {
hl := &datastore.HashLock{}
// Invalid hashes
testCases := []struct {
name, input string
}{
{"empty", ""},
{"short", "a0"},
{"badhex", "zzzzzzzzzzzzzzzzzzzzzzz"},
}
for _, tc := range testCases {
t.Run(tc.input, func(t *testing.T) {
l := hl.Get(tc.input)
if l != nil {
t.Errorf("Expected nil lock for %s %q, got %v", tc.name, tc.input, l)
}
})
}
// Valid hashes
testStrings := []string{
"deadbeef",
"00000000",
"ffffffff",
}
for _, ts := range testStrings {
t.Run(ts, func(t *testing.T) {
l := hl.Get(ts)
if l == nil {
t.Errorf("Expeced non-nil lock for hex string %q", ts)
}
})
}
a := hl.Get("deadbeef")
b := hl.Get("deadbeef")
if a != b {
t.Errorf("Expected identical locks for identical hashes, got: %p != %p", a, b)
}
a = hl.Get("deadbeef")
b = hl.Get("d3adb33f")
if a == b {
t.Errorf("Expected different locks for different hashes, got: %p == %p", a, b)
}
a = hl.Get("deadbeef")
b = hl.Get("deadb33f")
if a != b {
t.Errorf("Expected identical locks for identical leading hashes, got: %p != %p", a, b)
}
}

180
pkg/storage/retention.go Normal file
View File

@@ -0,0 +1,180 @@
package datastore
import (
"container/list"
"expvar"
"sync"
"time"
"github.com/jhillyerd/inbucket/pkg/config"
"github.com/jhillyerd/inbucket/pkg/log"
)
var (
retentionScanCompleted = time.Now()
retentionScanCompletedMu sync.RWMutex
// History counters
expRetentionDeletesTotal = new(expvar.Int)
expRetentionPeriod = new(expvar.Int)
expRetainedCurrent = new(expvar.Int)
// History of certain stats
retentionDeletesHist = list.New()
retainedHist = list.New()
// History rendered as comma delimited string
expRetentionDeletesHist = new(expvar.String)
expRetainedHist = new(expvar.String)
)
func init() {
rm := expvar.NewMap("retention")
rm.Set("SecondsSinceScanCompleted", expvar.Func(secondsSinceRetentionScanCompleted))
rm.Set("DeletesHist", expRetentionDeletesHist)
rm.Set("DeletesTotal", expRetentionDeletesTotal)
rm.Set("Period", expRetentionPeriod)
rm.Set("RetainedHist", expRetainedHist)
rm.Set("RetainedCurrent", expRetainedCurrent)
log.AddTickerFunc(func() {
expRetentionDeletesHist.Set(log.PushMetric(retentionDeletesHist, expRetentionDeletesTotal))
expRetainedHist.Set(log.PushMetric(retainedHist, expRetainedCurrent))
})
}
// RetentionScanner looks for messages older than the configured retention period and deletes them.
type RetentionScanner struct {
globalShutdown chan bool // Closes when Inbucket needs to shut down
retentionShutdown chan bool // Closed after the scanner has shut down
ds DataStore
retentionPeriod time.Duration
retentionSleep time.Duration
}
// NewRetentionScanner launches a go-routine that scans for expired
// messages, following the configured interval
func NewRetentionScanner(ds DataStore, shutdownChannel chan bool) *RetentionScanner {
cfg := config.GetDataStoreConfig()
rs := &RetentionScanner{
globalShutdown: shutdownChannel,
retentionShutdown: make(chan bool),
ds: ds,
retentionPeriod: time.Duration(cfg.RetentionMinutes) * time.Minute,
retentionSleep: time.Duration(cfg.RetentionSleep) * time.Millisecond,
}
// expRetentionPeriod is displayed on the status page
expRetentionPeriod.Set(int64(cfg.RetentionMinutes * 60))
return rs
}
// Start up the retention scanner if retention period > 0
func (rs *RetentionScanner) Start() {
if rs.retentionPeriod <= 0 {
log.Infof("Retention scanner disabled")
close(rs.retentionShutdown)
return
}
log.Infof("Retention configured for %v", rs.retentionPeriod)
go rs.run()
}
// run loops to kick off the scanner on the correct schedule
func (rs *RetentionScanner) run() {
start := time.Now()
retentionLoop:
for {
// Prevent scanner from starting more than once a minute
since := time.Since(start)
if since < time.Minute {
dur := time.Minute - since
log.Tracef("Retention scanner sleeping for %v", dur)
select {
case <-rs.globalShutdown:
break retentionLoop
case <-time.After(dur):
}
}
// Kickoff scan
start = time.Now()
if err := rs.doScan(); err != nil {
log.Errorf("Error during retention scan: %v", err)
}
// Check for global shutdown
select {
case <-rs.globalShutdown:
break retentionLoop
default:
}
}
log.Tracef("Retention scanner shut down")
close(rs.retentionShutdown)
}
// doScan does a single pass of all mailboxes looking for messages that can be purged
func (rs *RetentionScanner) doScan() error {
log.Tracef("Starting retention scan")
cutoff := time.Now().Add(-1 * rs.retentionPeriod)
mboxes, err := rs.ds.AllMailboxes()
if err != nil {
return err
}
retained := 0
// Loop over all mailboxes
for _, mb := range mboxes {
messages, err := mb.GetMessages()
if err != nil {
return err
}
// Loop over all messages in mailbox
for _, msg := range messages {
if msg.Date().Before(cutoff) {
log.Tracef("Purging expired message %v", msg.ID())
err = msg.Delete()
if err != nil {
// Log but don't abort
log.Errorf("Failed to purge message %v: %v", msg.ID(), err)
} else {
expRetentionDeletesTotal.Add(1)
}
} else {
retained++
}
}
// Sleep after completing a mailbox
select {
case <-rs.globalShutdown:
log.Tracef("Retention scan aborted due to shutdown")
return nil
case <-time.After(rs.retentionSleep):
// Reduce disk thrashing
}
}
// Update metrics
setRetentionScanCompleted(time.Now())
expRetainedCurrent.Set(int64(retained))
return nil
}
// Join does not retun until the retention scanner has shut down
func (rs *RetentionScanner) Join() {
if rs.retentionShutdown != nil {
<-rs.retentionShutdown
}
}
func setRetentionScanCompleted(t time.Time) {
retentionScanCompletedMu.Lock()
defer retentionScanCompletedMu.Unlock()
retentionScanCompleted = t
}
func getRetentionScanCompleted() time.Time {
retentionScanCompletedMu.RLock()
defer retentionScanCompletedMu.RUnlock()
return retentionScanCompleted
}
func secondsSinceRetentionScanCompleted() interface{} {
return time.Since(getRetentionScanCompleted()) / time.Second
}

View File

@@ -0,0 +1,67 @@
package datastore
import (
"fmt"
"testing"
"time"
)
func TestDoRetentionScan(t *testing.T) {
// Create mock objects
mds := &MockDataStore{}
mb1 := &MockMailbox{}
mb2 := &MockMailbox{}
mb3 := &MockMailbox{}
// Mockup some different aged messages (num is in hours)
new1 := mockMessage(0)
new2 := mockMessage(1)
new3 := mockMessage(2)
old1 := mockMessage(4)
old2 := mockMessage(12)
old3 := mockMessage(24)
// First it should ask for all mailboxes
mds.On("AllMailboxes").Return([]Mailbox{mb1, mb2, mb3}, nil)
// Then for all messages on each box
mb1.On("GetMessages").Return([]Message{new1, old1, old2}, nil)
mb2.On("GetMessages").Return([]Message{old3, new2}, nil)
mb3.On("GetMessages").Return([]Message{new3}, nil)
// Test 4 hour retention
rs := &RetentionScanner{
ds: mds,
retentionPeriod: 4*time.Hour - time.Minute,
retentionSleep: 0,
}
if err := rs.doScan(); err != nil {
t.Error(err)
}
// Check our assertions
mds.AssertExpectations(t)
mb1.AssertExpectations(t)
mb2.AssertExpectations(t)
mb3.AssertExpectations(t)
// Delete should not have been called on new messages
new1.AssertNotCalled(t, "Delete")
new2.AssertNotCalled(t, "Delete")
new3.AssertNotCalled(t, "Delete")
// Delete should have been called once on old messages
old1.AssertNumberOfCalls(t, "Delete", 1)
old2.AssertNumberOfCalls(t, "Delete", 1)
old3.AssertNumberOfCalls(t, "Delete", 1)
}
// Make a MockMessage of a specific age
func mockMessage(ageHours int) *MockMessage {
msg := &MockMessage{}
msg.On("ID").Return(fmt.Sprintf("MSG[age=%vh]", ageHours))
msg.On("Date").Return(time.Now().Add(time.Duration(ageHours*-1) * time.Hour))
msg.On("Delete").Return(nil)
return msg
}

162
pkg/storage/testing.go Normal file
View File

@@ -0,0 +1,162 @@
package datastore
import (
"io"
"net/mail"
"sync"
"time"
"github.com/jhillyerd/enmime"
"github.com/stretchr/testify/mock"
)
// MockDataStore is a shared mock for unit testing
type MockDataStore struct {
mock.Mock
}
// MailboxFor mock function
func (m *MockDataStore) MailboxFor(name string) (Mailbox, error) {
args := m.Called(name)
return args.Get(0).(Mailbox), args.Error(1)
}
// AllMailboxes mock function
func (m *MockDataStore) AllMailboxes() ([]Mailbox, error) {
args := m.Called()
return args.Get(0).([]Mailbox), args.Error(1)
}
func (m *MockDataStore) LockFor(name string) (*sync.RWMutex, error) {
return &sync.RWMutex{}, nil
}
// MockMailbox is a shared mock for unit testing
type MockMailbox struct {
mock.Mock
}
// GetMessages mock function
func (m *MockMailbox) GetMessages() ([]Message, error) {
args := m.Called()
return args.Get(0).([]Message), args.Error(1)
}
// GetMessage mock function
func (m *MockMailbox) GetMessage(id string) (Message, error) {
args := m.Called(id)
return args.Get(0).(Message), args.Error(1)
}
// Purge mock function
func (m *MockMailbox) Purge() error {
args := m.Called()
return args.Error(0)
}
// NewMessage mock function
func (m *MockMailbox) NewMessage() (Message, error) {
args := m.Called()
return args.Get(0).(Message), args.Error(1)
}
// Name mock function
func (m *MockMailbox) Name() string {
args := m.Called()
return args.String(0)
}
// String mock function
func (m *MockMailbox) String() string {
args := m.Called()
return args.String(0)
}
// MockMessage is a shared mock for unit testing
type MockMessage struct {
mock.Mock
}
// ID mock function
func (m *MockMessage) ID() string {
args := m.Called()
return args.String(0)
}
// From mock function
func (m *MockMessage) From() string {
args := m.Called()
return args.String(0)
}
// To mock function
func (m *MockMessage) To() []string {
args := m.Called()
return args.Get(0).([]string)
}
// Date mock function
func (m *MockMessage) Date() time.Time {
args := m.Called()
return args.Get(0).(time.Time)
}
// Subject mock function
func (m *MockMessage) Subject() string {
args := m.Called()
return args.String(0)
}
// ReadHeader mock function
func (m *MockMessage) ReadHeader() (msg *mail.Message, err error) {
args := m.Called()
return args.Get(0).(*mail.Message), args.Error(1)
}
// ReadBody mock function
func (m *MockMessage) ReadBody() (body *enmime.Envelope, err error) {
args := m.Called()
return args.Get(0).(*enmime.Envelope), args.Error(1)
}
// ReadRaw mock function
func (m *MockMessage) ReadRaw() (raw *string, err error) {
args := m.Called()
return args.Get(0).(*string), args.Error(1)
}
// RawReader mock function
func (m *MockMessage) RawReader() (reader io.ReadCloser, err error) {
args := m.Called()
return args.Get(0).(io.ReadCloser), args.Error(1)
}
// Size mock function
func (m *MockMessage) Size() int64 {
args := m.Called()
return int64(args.Int(0))
}
// Append mock function
func (m *MockMessage) Append(data []byte) error {
// []byte arg seems to mess up testify/mock
return nil
}
// Close mock function
func (m *MockMessage) Close() error {
args := m.Called()
return args.Error(0)
}
// Delete mock function
func (m *MockMessage) Delete() error {
args := m.Called()
return args.Error(0)
}
// String mock function
func (m *MockMessage) String() string {
args := m.Called()
return args.String(0)
}