paths: ['**.go', 'go.mod', '.github/workflows/*']
jobs:
+ # Test Windows and Linux with the latest Go version and the oldest we support.
test:
strategy:
fail-fast: false
matrix:
os:
- ubuntu-latest
- - macos-11
- - macos-12
- windows-latest
go:
- '1.16'
run: |
go test -race ./...
+ # Test only the latest Go version on macOS; we use the macOS builders for BSD
+ # and illumos, and GitHub doesn't allow many of them to run concurrently. If
+ # it works on Windows and Linux with Go 1.16, then it probably does on macOS
+ # too.
+ testMacOS:
+ name: test
+ strategy:
+ fail-fast: false
+ matrix:
+ os:
+ - macos-11
+ - macos-12
+ go:
+ - '1.19'
+ runs-on: ${{ matrix.os }}
+ steps:
+ - name: setup Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: ${{ matrix.go }}
+
+ - name: checkout
+ uses: actions/checkout@v3
+
+ - name: test
+ run: |
+ go test -race ./...
+
+ # FreeBSD
testFreeBSD:
runs-on: macos-12
name: test (freebsd, 1.18)
pw user add -n action -m
su action -c 'go test -race ./...'
+ # OpenBSD; no -race as the VM doesn't include the comp set.
+ #
+ # TODO: should probably add this, but on my local machine the tests time out
+ # with -race as the waits aren't long enough (OpenBSD is kind of slow),
+ # so should probably look into that first. Go 1.19 is supposed to have a
+ # much faster race detector, so maybe waiting until we have that is
+ # enough.
testOpenBSD:
runs-on: macos-12
name: test (openbsd, 1.17)
uses: vmactions/openbsd-vm@v0.0.6
with:
prepare: pkg_add go
- # No -race as the VM doesn't include the comp set.
- #
- # TODO: should probably add this, but on my local machine the tests
- # time out with -race as the waits aren't long enough (OpenBSD
- # is kind of slow), so should probably look into that first.
- # Go 1.19 is supposed to have a much faster race detector, so
- # maybe waiting until we have that is enough.
run: |
- # Default of 512 leads to "too many open files".
- ulimit -n 1024
useradd -mG wheel action
su action -c 'go test ./...'
+ # NetBSD
testNetBSD:
runs-on: macos-12
- name: test (netbsd, 1.17)
+ name: test (netbsd, 1.18)
steps:
- uses: actions/checkout@v3
- - name: test (netbsd, 1.17)
+ - name: test (netbsd, 1.18)
id: test
uses: vmactions/netbsd-vm@v0.0.4
with:
# TODO: no -race for the same reason as OpenBSD (the timing; it does run).
run: |
useradd -mG wheel action
- su action -c 'go117 test ./...'
+ su action -c 'go118 test ./...'
+ # illumos
testillumos:
runs-on: macos-12
name: test (illumos, 1.17)
pkg install go-117
run: |
/opt/ooce/go-1.17/bin/go test ./...
+
+ # Older Debian 6, for old Linux kernels.
+ testDebian6:
+ runs-on: macos-12
+ name: test (debian6, 1.19)
+ strategy:
+ fail-fast: false
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache Vagrant boxes
+ uses: actions/cache@v3
+ with:
+ path: ~/.vagrant.d/boxes
+ key: ${{ runner.os }}-vagrant-${{ hashFiles('Vagrantfile') }}
+ restore-keys: |
+ ${{ runner.os }}-vagrant-
+
+ - name: setup Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: '1.19'
+
+ - name: test (debian6, 1.19)
+ id: test
+ run: |
+ cp -f .github/workflows/Vagrantfile.debian6 Vagrantfile
+ export GOOS=linux
+ export GOARCH=amd64
+ for p in $(go list ./...); do
+ go test -c -o ${p//\//-}.test $p
+ done
+ vagrant up
+ for t in *.test; do
+ vagrant ssh -c "/vagrant/$t"
+ done
+++ /dev/null
-name: 'test-vagrant'
-on:
- push:
- paths: ['**.go', 'go.mod', '.github/workflows/*']
-
-jobs:
- test-vagrant:
- strategy:
- fail-fast: false
- matrix:
- image:
- - debian6
- runs-on: macos-12
- steps:
- - name: checkout
- uses: actions/checkout@v3
-
- - name: Cache Vagrant boxes
- uses: actions/cache@v3
- with:
- path: ~/.vagrant.d/boxes
- key: ${{ runner.os }}-vagrant-${{ hashFiles('Vagrantfile') }}
- restore-keys: |
- ${{ runner.os }}-vagrant-
-
- - name: setup Go
- uses: actions/setup-go@v3
- with:
- go-version: '1.19'
-
- - name: test
- run: |
- cp -f .github/workflows/Vagrantfile.${{ matrix.image }} Vagrantfile
- export GOOS=linux
- export GOARCH=amd64
- for p in $(go list ./...); do
- go test -c -o ${p//\//-}.test $p
- done
- vagrant up
- for t in *.test; do
- vagrant ssh -c "/vagrant/$t"
- done
# go test -c output
*.test
+*.test.exe
# Output of go build ./cmd/fsnotify
/fsnotify
platforms. Testing different platforms locally can be done with something like
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
-The main tests are in [integration_test.go].
+Use the `-short` flag to make the "stress test" run faster.
+
[goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
case w.Errors <- err:
return true
case <-w.done:
+ return false
}
- return false
}
func (w *Watcher) isClosed() bool {
close(w.done)
w.mu.Unlock()
- // Causes any blocking reads to return with an error, provided the file still supports deadline operations
+ // Causes any blocking reads to return with an error, provided the file
+ // still supports deadline operations.
err := w.inotifyFile.Close()
if err != nil {
return err
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
- // TODO: Perhaps it's not helpful to return an error here in every case.
- // the only two possible errors are:
- // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
- // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
- // Watch descriptors are invalidated when they are removed explicitly or implicitly;
- // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ // TODO: Perhaps it's not helpful to return an error here in every case;
+ // The only two possible errors are:
+ //
+ // - EBADF, which happens when w.fd is not a valid file descriptor
+ // of any kind.
+ // - EINVAL, which is when fd is not an inotify descriptor or wd
+ // is not a valid watch descriptor. Watch descriptors are
+ // invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they
+ // are watching is deleted.
return errno
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
+ defer func() {
+ close(w.doneResp)
+ close(w.Errors)
+ close(w.Events)
+ }()
+
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
errno error // Syscall errno
)
-
- defer close(w.doneResp)
- defer close(w.Errors)
- defer close(w.Events)
-
for {
// See if we have been closed.
if w.isClosed() {
"fmt"
"os"
"path/filepath"
+ "strconv"
"strings"
"sync"
"testing"
"time"
)
-// TODO: I'm not sure if these tests are still needed; I think they've become
-// redundant after epoll was removed. Keep them for now to be sure.
-func TestInotifyClose(t *testing.T) {
- isWatcherReallyClosed := func(t *testing.T, w *Watcher) {
- select {
- case err, ok := <-w.Errors:
- if ok {
- t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
- }
- default:
- t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
- }
- select {
- case _, ok := <-w.Events:
- if ok {
- t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
- }
- default:
- t.Fatalf("w.Events would have blocked; readEvents is still alive!")
- }
- }
-
- t.Run("close immediately", func(t *testing.T) {
- t.Parallel()
- w := newWatcher(t)
-
- w.Close() // Close immediately; it won't even reach the first unix.Read.
- <-time.After(50 * time.Millisecond) // Wait for the close to complete.
- isWatcherReallyClosed(t, w)
- })
-
- t.Run("close slightly later", func(t *testing.T) {
- t.Parallel()
- w := newWatcher(t)
-
- <-time.After(50 * time.Millisecond) // Wait until readEvents has reached unix.Read, and Close.
- w.Close()
- <-time.After(50 * time.Millisecond) // Wait for the close to complete.
- isWatcherReallyClosed(t, w)
- })
-
- t.Run("close slightly later with watch", func(t *testing.T) {
- t.Parallel()
- w := newWatcher(t)
- addWatch(t, w, t.TempDir())
-
- <-time.After(50 * time.Millisecond) // Wait until readEvents has reached unix.Read, and Close.
- w.Close()
- <-time.After(50 * time.Millisecond) // Wait for the close to complete.
- isWatcherReallyClosed(t, w)
- })
-
- t.Run("close after read", func(t *testing.T) {
- t.Parallel()
- tmp := t.TempDir()
- w := newWatcher(t)
- addWatch(t, w, tmp)
-
- touch(t, tmp, "somethingSOMETHINGsomethingSOMETHING") // Generate an event.
-
- <-time.After(50 * time.Millisecond) // Wait for readEvents to read the event, then close the watcher.
- w.Close()
- <-time.After(50 * time.Millisecond) // Wait for the close to complete.
- isWatcherReallyClosed(t, w)
- })
-
- t.Run("replace after close", func(t *testing.T) {
- t.Parallel()
-
- tmp := t.TempDir()
- w := newWatcher(t)
- defer w.Close()
-
- addWatch(t, w, tmp)
- touch(t, tmp, "testfile")
- select {
- case <-w.Events:
- case err := <-w.Errors:
- t.Fatalf("Error from watcher: %v", err)
- case <-time.After(50 * time.Millisecond):
- t.Fatalf("Took too long to wait for event")
- }
-
- // At this point, we've received one event, so the goroutine is ready and it
- // blocking on unix.Read. Now try to swap the file descriptor under its
- // nose.
- w.Close()
- w, err := NewWatcher()
- defer func() { _ = w.Close() }()
- if err != nil {
- t.Fatalf("Failed to create second watcher: %v", err)
- }
-
- <-time.After(50 * time.Millisecond)
- err = w.Add(tmp)
- if err != nil {
- t.Fatalf("Error adding tmp dir again: %v", err)
- }
- })
-}
-
-// Verify the watcher can keep up with file creations/deletions when under load.
+// Make sure there are no additional threads being created.
//
-// TODO: should probably be in integrations_test.
-func TestInotifyStress(t *testing.T) {
- maxNumToCreate := 1000
-
- tmp := t.TempDir()
- testFilePrefix := filepath.Join(tmp, "testfile")
-
- w := newWatcher(t)
- defer w.Close()
- addWatch(t, w, tmp)
-
- doneChan := make(chan struct{})
- // The buffer ensures that the file generation goroutine is never blocked.
- errChan := make(chan error, 2*maxNumToCreate)
-
- go func() {
- for i := 0; i < maxNumToCreate; i++ {
- testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
-
- handle, err := os.Create(testFile)
- if err != nil {
- errChan <- fmt.Errorf("Create failed: %v", err)
- continue
- }
-
- err = handle.Close()
- if err != nil {
- errChan <- fmt.Errorf("Close failed: %v", err)
- continue
- }
- }
-
- // If we delete a newly created file too quickly, inotify will skip the
- // create event and only send the delete event.
- time.Sleep(100 * time.Millisecond)
-
- for i := 0; i < maxNumToCreate; i++ {
- testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
- err := os.Remove(testFile)
- if err != nil {
- errChan <- fmt.Errorf("Remove failed: %v", err)
- }
- }
-
- close(doneChan)
- }()
-
- creates := 0
- removes := 0
-
- finished := false
- after := time.After(10 * time.Second)
- for !finished {
- select {
- case <-after:
- t.Fatalf("Not done")
- case <-doneChan:
- finished = true
- case err := <-errChan:
- t.Fatalf("Got an error from file creator goroutine: %v", err)
- case err := <-w.Errors:
- t.Fatalf("Got an error from watcher: %v", err)
- case evt := <-w.Events:
- if !strings.HasPrefix(evt.Name, testFilePrefix) {
- t.Fatalf("Got an event for an unknown file: %s", evt.Name)
- }
- if evt.Op == Create {
- creates++
- }
- if evt.Op == Remove {
- removes++
- }
- }
- }
-
- // Drain remaining events from channels
- count := 0
- for count < 10 {
- select {
- case err := <-errChan:
- t.Fatalf("Got an error from file creator goroutine: %v", err)
- case err := <-w.Errors:
- t.Fatalf("Got an error from watcher: %v", err)
- case evt := <-w.Events:
- if !strings.HasPrefix(evt.Name, testFilePrefix) {
- t.Fatalf("Got an event for an unknown file: %s", evt.Name)
- }
- if evt.Op == Create {
- creates++
- }
- if evt.Op == Remove {
- removes++
- }
- count = 0
- default:
- count++
- // Give the watcher chances to fill the channels.
- time.Sleep(time.Millisecond)
- }
- }
-
- if creates-removes > 1 || creates-removes < -1 {
- t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
- }
- if creates < 50 {
- t.Fatalf("Expected at least 50 creates, got %d", creates)
- }
-}
-
-func TestInotifyInnerMapLength(t *testing.T) {
- t.Parallel()
-
- tmp := t.TempDir()
- file := filepath.Join(tmp, "testfile")
-
- touch(t, file)
-
- w := newWatcher(t)
- addWatch(t, w, file)
-
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- for err := range w.Errors {
- t.Errorf("error received: %s", err)
- }
- }()
-
- rm(t, file)
- <-w.Events // consume Remove event
- <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
-
- func() {
- w.mu.Lock()
- defer w.mu.Unlock()
- if len(w.watches) != 0 {
- t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
- }
- if len(w.paths) != 0 {
- t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
- }
- }()
-
- w.Close()
- wg.Wait()
-}
-
-func TestInotifyOverflow(t *testing.T) {
- t.Parallel()
-
- // We need to generate many more events than the
- // fs.inotify.max_queued_events sysctl setting. We use multiple goroutines
- // (one per directory) to speed up file creation.
- numDirs := 128
- numFiles := 1024
-
- tmp := t.TempDir()
- w := newWatcher(t)
- defer w.Close()
-
- for dn := 0; dn < numDirs; dn++ {
- dir := fmt.Sprintf("%s/%d", tmp, dn)
- mkdir(t, dir, noWait)
- addWatch(t, w, dir)
- }
-
- errChan := make(chan error, numDirs*numFiles)
-
- // All events need to be in the inotify queue before pulling events off it
- // to trigger this error.
- wg := sync.WaitGroup{}
- for dn := 0; dn < numDirs; dn++ {
- dir := fmt.Sprintf("%s/%d", tmp, dn)
-
- wg.Add(1)
- go func() {
- for fn := 0; fn < numFiles; fn++ {
- testFile := fmt.Sprintf("%s/%d", dir, fn)
-
- handle, err := os.Create(testFile)
- if err != nil {
- errChan <- fmt.Errorf("Create failed: %v", err)
- continue
- }
-
- err = handle.Close()
- if err != nil {
- errChan <- fmt.Errorf("Close failed: %v", err)
- continue
- }
- }
- wg.Done()
- }()
- }
- wg.Wait()
-
- creates := 0
- overflows := 0
-
- after := time.After(10 * time.Second)
- for overflows == 0 && creates < numDirs*numFiles {
- select {
- case <-after:
- t.Fatalf("Not done")
- case err := <-errChan:
- t.Fatalf("Got an error from file creator goroutine: %v", err)
- case err := <-w.Errors:
- if err == ErrEventOverflow {
- overflows++
- } else {
- t.Fatalf("Got an error from watcher: %v", err)
- }
- case evt := <-w.Events:
- if !strings.HasPrefix(evt.Name, tmp) {
- t.Fatalf("Got an event for an unknown file: %s", evt.Name)
- }
- if evt.Op == Create {
- creates++
- }
- }
- }
-
- if creates == numDirs*numFiles {
- t.Fatalf("Could not trigger overflow")
- }
-
- if overflows == 0 {
- t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
- numDirs*numFiles, creates)
- }
-}
-
+// TODO: should generalize this and run for all backends.
func TestInotifyNoBlockingSyscalls(t *testing.T) {
test := func() error {
getThreads := func() (int, error) {
}
}
-// TODO: the below should probably be in integration_test, as they're not really
-// inotify-specific as far as I can see.
-
-func TestInotifyRemoveTwice(t *testing.T) {
+// Ensure that the correct error is returned on overflows.
+func TestInotifyOverflow(t *testing.T) {
t.Parallel()
tmp := t.TempDir()
- testFile := filepath.Join(tmp, "testfile")
-
- touch(t, testFile)
-
w := newWatcher(t)
defer w.Close()
- addWatch(t, w, testFile)
- err := w.Remove(testFile)
- if err != nil {
- t.Fatal(err)
+ // We need to generate many more events than the
+ // fs.inotify.max_queued_events sysctl setting.
+ numDirs, numFiles := 128, 1024
+
+ // All events need to be in the inotify queue before pulling events off it
+ // to trigger this error.
+ var wg sync.WaitGroup
+ for i := 0; i < numDirs; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ dir := filepath.Join(tmp, strconv.Itoa(i))
+ mkdir(t, dir, noWait)
+ addWatch(t, w, dir)
+
+ createFiles(t, dir, "", numFiles, 10*time.Second)
+ }(i)
}
+ wg.Wait()
- err = w.Remove(testFile)
- if err == nil {
- t.Fatalf("no error on removing invalid file")
- } else if !errors.Is(err, ErrNonExistentWatch) {
- t.Fatalf("remove %q: %s", testFile, err)
+ var (
+ creates = 0
+ overflows = 0
+ )
+ for overflows == 0 && creates < numDirs*numFiles {
+ select {
+ case <-time.After(10 * time.Second):
+ t.Fatalf("Not done")
+ case err := <-w.Errors:
+ if !errors.Is(err, ErrEventOverflow) {
+ t.Fatalf("unexpected error from watcher: %v", err)
+ }
+ overflows++
+ case e := <-w.Events:
+ if !strings.HasPrefix(e.Name, tmp) {
+ t.Fatalf("Event for unknown file: %s", e.Name)
+ }
+ if e.Op == Create {
+ creates++
+ }
+ }
}
- w.mu.Lock()
- defer w.mu.Unlock()
- if len(w.watches) != 0 {
- t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ if creates == numDirs*numFiles {
+ t.Fatalf("could not trigger overflow")
}
- if len(w.paths) != 0 {
- t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ if overflows == 0 {
+ t.Fatalf("no overflow and not enough CREATE events (expected %d, got %d)",
+ numDirs*numFiles, creates)
}
}
-func TestInotifyWatchList(t *testing.T) {
+// Test inotify's "we don't send REMOVE until all file descriptors are removed"
+// behaviour.
+func TestInotifyDeleteOpenFile(t *testing.T) {
t.Parallel()
tmp := t.TempDir()
- testFile := filepath.Join(tmp, "testfile")
+ file := filepath.Join(tmp, "file")
- touch(t, testFile)
+ touch(t, file)
+ fp, err := os.Open(file)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ defer fp.Close()
- w := newWatcher(t)
- defer w.Close()
- addWatch(t, w, testFile)
- addWatch(t, w, tmp)
+ w := newCollector(t, file)
+ w.collect(t)
- value := w.WatchList()
+ rm(t, file)
+ e := w.events(t)
+ cmpEvents(t, tmp, e, newEvents(t, `chmod /file`))
- w.mu.Lock()
- defer w.mu.Unlock()
- for _, entry := range value {
- if _, ok := w.watches[entry]; !ok {
- t.Fatal("return value of WatchList is not same as the expected")
- }
- }
+ fp.Close()
+ e = w.stop(t)
+ cmpEvents(t, tmp, e, newEvents(t, `remove /file`))
}
-func TestInotifyDeleteOpenedFile(t *testing.T) {
- t.Parallel()
+func TestRemoveState(t *testing.T) {
+ var (
+ tmp = t.TempDir()
+ dir = filepath.Join(tmp, "dir")
+ file = filepath.Join(dir, "file")
+ )
+ mkdir(t, dir)
+ touch(t, file)
- tmp := t.TempDir()
- testFile := filepath.Join(tmp, "testfile")
+ w := newWatcher(t, tmp)
+ addWatch(t, w, tmp)
+ addWatch(t, w, file)
- fd, err := os.Create(testFile)
- if err != nil {
- t.Fatalf("Create failed: %v", err)
+ check := func(want int) {
+ t.Helper()
+ if len(w.watches) != want {
+ t.Error(w.watches)
+ }
+ if len(w.paths) != want {
+ t.Error(w.paths)
+ }
}
- defer fd.Close()
- w := newWatcher(t)
- defer w.Close()
- addWatch(t, w, testFile)
+ check(2)
- checkEvent := func(exp Op) {
- select {
- case event := <-w.Events:
- t.Logf("Event received: %s", event.Op)
- if event.Op != exp {
- t.Fatalf("Event expected: %s, got: %s", exp, event.Op)
- }
- case <-time.After(100 * time.Millisecond):
- t.Fatalf("Expected %s event not received", exp)
- }
+ if err := w.Remove(file); err != nil {
+ t.Fatal(err)
}
+ check(1)
- // Remove the (opened) file, check Chmod event (notifying about file link
- // count change) is received
- rm(t, testFile)
- checkEvent(Chmod)
-
- // Close the file, check Remove event is received
- fd.Close()
- checkEvent(Remove)
+ if err := w.Remove(tmp); err != nil {
+ t.Fatal(err)
+ }
+ check(0)
}
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
- w.mu.Unlock()
- // unlock before calling Remove, which also locks
-
+ w.mu.Unlock() // Unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// Send "quit" message to the reader goroutine.
unix.Close(w.closepipe[1])
+ close(w.done)
return nil
}
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
+ delete(w.userWatches, name)
parentName := filepath.Dir(name)
delete(w.watchesByDir[parentName], watchfd)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
+ delete(w.fileExists, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
w.mu.Lock()
defer w.mu.Unlock()
- entries := make([]string, 0, len(w.watches))
- for pathname := range w.watches {
+ entries := make([]string, 0, len(w.userWatches))
+ for pathname := range w.userWatches {
entries = append(entries, pathname)
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
- eventBuffer := make([]unix.Kevent_t, 10)
defer func() {
err := unix.Close(w.kq)
if err != nil {
w.Errors <- err
}
unix.Close(w.closepipe[0])
- close(w.done)
close(w.Events)
close(w.Errors)
}()
+ eventBuffer := make([]unix.Kevent_t, 10)
for closed := false; !closed; {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
- if !w.sendError(err) {
+ if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
closed = true
}
continue
return nil
}
-// sendDirectoryEvents searches the directory for newly created files
-// and sends them over the event channel. This functionality is to have
-// the BSD version of fsnotify match Linux inotify which provides a
-// create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+// Search the directory for new files and send an event for them.
+//
+// This functionality is to have the BSD watcher match the inotify, which sends
+// a create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dir string) {
// Get all files
- files, err := ioutil.ReadDir(dirPath)
+ files, err := ioutil.ReadDir(dir)
if err != nil {
- if !w.sendError(err) {
+ if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
return
}
}
// Search for new files
- for _, fileInfo := range files {
- filePath := filepath.Join(dirPath, fileInfo.Name())
- err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ for _, fi := range files {
+ err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
return
}
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
- // Send create event
if !w.sendEvent(Event{Name: filePath, Op: Create}) {
return
}
--- /dev/null
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestRemoveState(t *testing.T) {
+ var (
+ tmp = t.TempDir()
+ dir = filepath.Join(tmp, "dir")
+ file = filepath.Join(dir, "file")
+ )
+ mkdir(t, dir)
+ touch(t, file)
+
+ w := newWatcher(t, tmp)
+ addWatch(t, w, tmp)
+ addWatch(t, w, file)
+
+ check := func(wantUser, wantTotal int) {
+ t.Helper()
+
+ if len(w.watches) != wantTotal {
+ var d []string
+ for k, v := range w.watches {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.watches (have %d, want %d):\n%v",
+ len(w.watches), wantTotal, strings.Join(d, "\n"))
+ }
+ if len(w.paths) != wantTotal {
+ var d []string
+ for k, v := range w.paths {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.paths (have %d, want %d):\n%v",
+ len(w.paths), wantTotal, strings.Join(d, "\n"))
+ }
+ if len(w.userWatches) != wantUser {
+ var d []string
+ for k, v := range w.userWatches {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.userWatches (have %d, want %d):\n%v",
+ len(w.userWatches), wantUser, strings.Join(d, "\n"))
+ }
+ }
+
+ check(2, 3)
+
+ if err := w.Remove(file); err != nil {
+ t.Fatal(err)
+ }
+ check(1, 2)
+
+ if err := w.Remove(tmp); err != nil {
+ t.Fatal(err)
+ }
+ check(0, 0)
+
+ // Don't check these after ever remove since they don't map easily to number
+ // of files watches. Just make sure they're 0 after everything is removed.
+ {
+ want := 0
+ if len(w.watchesByDir) != want {
+ var d []string
+ for k, v := range w.watchesByDir {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.watchesByDir (have %d, want %d):\n%v",
+ len(w.watchesByDir), want, strings.Join(d, "\n"))
+ }
+ if len(w.dirFlags) != want {
+ var d []string
+ for k, v := range w.dirFlags {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.dirFlags (have %d, want %d):\n%v",
+ len(w.dirFlags), want, strings.Join(d, "\n"))
+ }
+
+ if len(w.fileExists) != want {
+ var d []string
+ for k, v := range w.fileExists {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.fileExists (have %d, want %d):\n%v",
+ len(w.fileExists), want, strings.Join(d, "\n"))
+ }
+ }
+}
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
+
return w.startRead(watch)
}
--- /dev/null
+//go:build windows
+// +build windows
+
+package fsnotify
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestRemoveState(t *testing.T) {
+ // TODO: the Windows backend is too confusing; needs some serious attention.
+ return
+
+ var (
+ tmp = t.TempDir()
+ dir = filepath.Join(tmp, "dir")
+ file = filepath.Join(dir, "file")
+ )
+ mkdir(t, dir)
+ touch(t, file)
+
+ w := newWatcher(t, tmp)
+ addWatch(t, w, tmp)
+ addWatch(t, w, file)
+
+ check := func(want int) {
+ t.Helper()
+ if len(w.watches) != want {
+ var d []string
+ for k, v := range w.watches {
+ d = append(d, fmt.Sprintf("%#v = %#v", k, v))
+ }
+ t.Errorf("unexpected number of entries in w.watches (have %d, want %d):\n%v",
+ len(w.watches), want, strings.Join(d, "\n"))
+ }
+ }
+
+ check(2)
+
+ if err := w.Remove(file); err != nil {
+ t.Fatal(err)
+ }
+ check(1)
+
+ if err := w.Remove(tmp); err != nil {
+ t.Fatal(err)
+ }
+ check(0)
+}
import (
"errors"
"fmt"
+ "os"
"path/filepath"
+ "reflect"
"runtime"
+ "sort"
"strings"
"sync/atomic"
"testing"
"github.com/fsnotify/fsnotify/internal"
)
+// Set soft open file limit to the maximum; on e.g. OpenBSD it's 512/1024.
+//
+// Go 1.19 will always do this when the os package is imported.
+//
+// https://go-review.googlesource.com/c/go/+/393354/
+func init() {
+ internal.SetRlimit()
+}
+
func TestWatch(t *testing.T) {
tests := []testCase{
{"multiple creates", func(t *testing.T, w *Watcher, tmp string) {
remove /sub
remove /file
+ # TODO: not sure why the REMOVE /sub is dropped.
+ dragonfly:
+ create /sub
+ create /file
+ remove /file
+
# Windows includes a write for the /sub dir too, two of them even(?)
windows:
create /sub
WRITE "/file"
REMOVE "/file"
`},
+
+ {"watch same dir twice", func(t *testing.T, w *Watcher, tmp string) {
+ addWatch(t, w, tmp)
+ addWatch(t, w, tmp)
+
+ touch(t, tmp, "file")
+ cat(t, "hello", tmp, "file")
+ rm(t, tmp, "file")
+ mkdir(t, tmp, "dir")
+ }, `
+ create /file
+ write /file
+ remove /file
+ create /dir
+ `},
+
+ {"watch same file twice", func(t *testing.T, w *Watcher, tmp string) {
+ file := filepath.Join(tmp, "file")
+ touch(t, file)
+
+ addWatch(t, w, file)
+ addWatch(t, w, file)
+
+ cat(t, "hello", tmp, "file")
+ }, `
+ write /file
+ `},
}
for _, tt := range tests {
# No remove event for inotify; inotify just sends MOVE_SELF.
linux:
create /renamed
+
+ # TODO: this is broken.
+ dragonfly:
+ REMOVE|WRITE "/"
`},
{"rename watched directory", func(t *testing.T, w *Watcher, tmp string) {
windows:
create /link
write /link
+
+ # No events at all on Dragonfly
+ # TODO: should fix this.
+ dragonfly:
+ empty
`},
{"cyclic symlink", func(t *testing.T, w *Watcher, tmp string) {
func TestWatchRm(t *testing.T) {
tests := []testCase{
+ {"remove watched file", func(t *testing.T, w *Watcher, tmp string) {
+ file := filepath.Join(tmp, "file")
+ touch(t, file)
+
+ addWatch(t, w, file)
+ rm(t, file)
+ }, `
+ REMOVE "/file"
+
+ # unlink always emits a CHMOD on Linux.
+ linux:
+ CHMOD "/file"
+ REMOVE "/file"
+ `},
+
+ {"remove watched file with open fd", func(t *testing.T, w *Watcher, tmp string) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Windows hard-locks open files so this will never work")
+ }
+
+ file := filepath.Join(tmp, "file")
+ touch(t, file)
+
+ // Intentionally don't close the descriptor here so it stays around.
+ _, err := os.Open(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addWatch(t, w, file)
+ rm(t, file)
+ }, `
+ REMOVE "/file"
+
+ # inotify will just emit a CHMOD for the unlink, but won't actually
+ # emit a REMOVE until the descriptor is closed. Bit odd, but not much
+ # we can do about it. The REMOVE is tested in TestInotifyDeleteOpenFile()
+ linux:
+ CHMOD "/file"
+ `},
+
{"remove watched directory", func(t *testing.T, w *Watcher, tmp string) {
if runtime.GOOS == "openbsd" || runtime.GOOS == "netbsd" {
t.Skip("behaviour is inconsistent on OpenBSD and NetBSD, and this test is flaky")
}
func TestClose(t *testing.T) {
+ chanClosed := func(t *testing.T, w *Watcher) {
+ t.Helper()
+
+ // Need a small sleep as Close() on kqueue does all sorts of things,
+ // which may take a little bit.
+ switch runtime.GOOS {
+ case "freebsd", "openbsd", "netbsd", "dragonfly", "darwin":
+ time.Sleep(5 * time.Millisecond)
+ }
+
+ select {
+ default:
+ t.Fatal("blocking on Events")
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatal("Events not closed")
+ }
+ }
+ select {
+ default:
+ t.Fatal("blocking on Errors")
+ case _, ok := <-w.Errors:
+ if ok {
+ t.Fatal("Errors not closed")
+ }
+ }
+ }
+
t.Run("close", func(t *testing.T) {
t.Parallel()
if err := w.Close(); err != nil {
t.Fatal(err)
}
+ chanClosed(t, w)
var done int32
go func() {
if err := w.Close(); err != nil {
t.Fatal(err)
}
+
+ // TODO: windows backend doesn't work well here; can't easily fix it.
+ // Need to rewrite things a bit.
+ if runtime.GOOS != "windows" {
+ chanClosed(t, w)
+ }
})
// Make sure that calling Close() while REMOVE events are emitted doesn't race.
go w.Close()
}
})
+
+ t.Run("closes channels after read", func(t *testing.T) {
+ t.Parallel()
+
+ tmp := t.TempDir()
+
+ w := newCollector(t, tmp)
+ w.collect(t)
+ touch(t, tmp, "qwe")
+ touch(t, tmp, "asd")
+
+ if err := w.w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ chanClosed(t, w.w)
+ })
}
func TestAdd(t *testing.T) {
})
t.Run("remove same dir twice", func(t *testing.T) {
+ t.Parallel()
+
tmp := t.TempDir()
touch(t, tmp, "file")
if err := w.Remove(tmp); err != nil {
t.Fatal(err)
}
- if err := w.Remove(tmp); err == nil {
+ err := w.Remove(tmp)
+ if err == nil {
t.Fatal("no error")
}
+ if !errors.Is(err, ErrNonExistentWatch) {
+ t.Fatalf("wrong error: %T", err)
+ }
})
// Make sure that concurrent calls to Remove() don't race.
})
}
}
+
+func isKqueue() bool {
+ switch runtime.GOOS {
+ case "linux", "windows":
+ return false
+ }
+ return true
+}
+
+// Verify the watcher can keep up with file creations/deletions when under load.
+func TestWatchStress(t *testing.T) {
+ // On NetBSD ioutil.ReadDir in sendDirectoryChangeEvents() returns EINVAL
+ // ~80% of the time:
+ //
+ // readdirent /tmp/TestWatchStress3584363325/001: invalid argument
+ //
+ // This ends up calling getdents(), the manpage says:
+ //
+ // [EINVAL] A directory was being read on NFS, but it was modified on the
+ // server while it was being read.
+ //
+ // Which is, eh, odd? Maybe I read the code wrong and it's calling another
+ // function too(?)
+ //
+ // Because this happens on the Errors channel we can't "skip" it like with
+ // other kqueue platorms, so just skip the entire test for now.
+ //
+ // TODO: fix this.
+ if runtime.GOOS == "netbsd" {
+ t.Skip("broken on NetBSD")
+ }
+
+ Errorf := func(t *testing.T, msg string, args ...interface{}) {
+ if !isKqueue() {
+ t.Errorf(msg, args...)
+ return
+ }
+
+ // On kqueue platforms it doesn't seem to sync properly; see comment for
+ // the sleep below.
+ //
+ // TODO: fix this.
+ t.Logf(msg, args...)
+ t.Skip("flaky on kqueue; allowed to fail")
+ }
+
+ tmp := t.TempDir()
+ w := newCollector(t, tmp)
+ w.collect(t)
+
+ fmtNum := func(n int) string {
+ s := fmt.Sprintf("%09d", n)
+ return s[:3] + "_" + s[3:6] + "_" + s[6:]
+ }
+
+ var (
+ numFiles = 1_500_000
+ runFor = 30 * time.Second
+ )
+ if testing.Short() {
+ runFor = time.Second
+ }
+
+ // Otherwise platforms with low limits such as as OpenBSD and NetBSD will
+ // fail, since every watched file uses a file descriptor. Need superuser
+ // permissions and twiddling with /etc/login.conf to adjust them, so we
+ // can't "just increase it".
+ if isKqueue() && uint64(numFiles) > internal.Maxfiles() {
+ numFiles = int(internal.Maxfiles()) - 100
+ t.Logf("limiting files to %d due to max open files limit", numFiles)
+ }
+
+ var (
+ prefix = "xyz-prefix-"
+ done = make(chan struct{})
+ )
+ // testing.Short()
+ go func() {
+ numFiles = createFiles(t, tmp, prefix, numFiles, runFor)
+
+ // TODO: this shouldn't be needed; and if this is too short some very
+ // odd events happen:
+ //
+ // fsnotify_test.go:837: saw 42 unexpected events:
+ // REMOVE ""
+ // CREATE "."
+ // REMOVE ""
+ // CREATE "."
+ // REMOVE ""
+ // ...
+ //
+ // fsnotify_test.go:848: expected the following 3175 events, but didn't see them (showing first 100 only)
+ // REMOVE "/xyz-prefix-000_015_080"
+ // REMOVE "/xyz-prefix-000_014_536"
+ // CREATE "/xyz-prefix-000_015_416"
+ // CREATE "/xyz-prefix-000_015_406"
+ // ...
+ //
+ // Should really add a Sync() method which processes all outstanding
+ // events.
+ if isKqueue() {
+ time.Sleep(1000 * time.Millisecond)
+ if !testing.Short() {
+ time.Sleep(1000 * time.Millisecond)
+ }
+ }
+
+ for i := 0; i < numFiles; i++ {
+ rm(t, tmp, prefix+fmtNum(i), noWait)
+ }
+ close(done)
+ }()
+ <-done
+
+ have := w.stopWait(t, 10*time.Second)
+
+ // Do some work to get reasonably nice error reports; what cmpEvents() gives
+ // us is nice if you have just a few events, but with thousands it qiuckly
+ // gets unwieldy.
+
+ want := make(map[Event]struct{})
+ for i := 0; i < numFiles; i++ {
+ n := "/" + prefix + fmtNum(i)
+ want[Event{Name: n, Op: Remove}] = struct{}{}
+ want[Event{Name: n, Op: Create}] = struct{}{}
+ }
+
+ var extra Events
+ for _, h := range have {
+ h.Name = filepath.ToSlash(strings.TrimPrefix(h.Name, tmp))
+ _, ok := want[h]
+ if ok {
+ delete(want, h)
+ } else {
+ extra = append(extra, h)
+ }
+ }
+
+ if len(extra) > 0 {
+ if len(extra) > 100 {
+ Errorf(t, "saw %d unexpected events (showing first 100 only):\n%s", len(extra), extra[:100])
+ } else {
+ Errorf(t, "saw %d unexpected events:\n%s", len(extra), extra)
+ }
+ }
+
+ if len(want) != 0 {
+ wantE := make(Events, 0, len(want))
+ for k := range want {
+ wantE = append(wantE, k)
+ }
+
+ if len(wantE) > 100 {
+ Errorf(t, "expected the following %d events, but didn't see them (showing first 100 only)\n%s", len(wantE), wantE[:100])
+ } else {
+ Errorf(t, "expected the following %d events, but didn't see them\n%s", len(wantE), wantE)
+ }
+ }
+}
+
+func TestWatchList(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // TODO: probably should I guess...
+ t.Skip("WatchList has always beek broken on Windows and I don't feel like fixing it")
+ }
+
+ t.Parallel()
+
+ tmp := t.TempDir()
+ file := filepath.Join(tmp, "file")
+ other := filepath.Join(tmp, "other")
+
+ touch(t, file)
+ touch(t, other)
+
+ w := newWatcher(t, file, tmp)
+ defer w.Close()
+
+ have := w.WatchList()
+ sort.Strings(have)
+ want := []string{tmp, file}
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("\nhave: %s\nwant: %s", have, want)
+ }
+}
}
// addWatch adds a watch for a directory
-func addWatch(t *testing.T, watcher *Watcher, path ...string) {
+func addWatch(t *testing.T, w *Watcher, path ...string) {
t.Helper()
if len(path) < 1 {
t.Fatalf("addWatch: path must have at least one element: %s", path)
}
- err := watcher.Add(filepath.Join(path...))
+ err := w.Add(filepath.Join(path...))
if err != nil {
t.Fatalf("addWatch(%q): %s", filepath.Join(path...), err)
}
return true
}
+// Create n empty files with the prefix in the directory dir.
+func createFiles(t *testing.T, dir, prefix string, n int, d time.Duration) int {
+ t.Helper()
+
+ if d == 0 {
+ d = 9 * time.Minute
+ }
+
+ fmtNum := func(n int) string {
+ s := fmt.Sprintf("%09d", n)
+ return s[:3] + "_" + s[3:6] + "_" + s[6:]
+ }
+
+ var (
+ max = time.After(d)
+ created int
+ )
+ for i := 0; i < n; i++ {
+ select {
+ case <-max:
+ t.Logf("createFiles: stopped at %s files because it took longer than %s", fmtNum(created), d)
+ return created
+ default:
+ fp, err := os.Create(filepath.Join(dir, prefix+fmtNum(i)))
+ if err != nil {
+ t.Errorf("create failed for %s: %s", fmtNum(i), err)
+ continue
+ }
+ if err := fp.Close(); err != nil {
+ t.Errorf("close failed for %s: %s", fmtNum(i), err)
+ }
+ if i%10_000 == 0 {
+ t.Logf("createFiles: %s", fmtNum(i))
+ }
+ created++
+ }
+ }
+ return created
+}
+
// mkdir
func mkdir(t *testing.T, path ...string) {
t.Helper()
//
// events := w.stop(t)
type eventCollector struct {
- w *Watcher
- events Events
- mu sync.Mutex
- done chan struct{}
+ w *Watcher
+ e Events
+ mu sync.Mutex
+ done chan struct{}
}
-func newCollector(t *testing.T) *eventCollector {
- return &eventCollector{w: newWatcher(t), done: make(chan struct{})}
+func newCollector(t *testing.T, add ...string) *eventCollector {
+ return &eventCollector{
+ w: newWatcher(t, add...),
+ done: make(chan struct{}),
+ e: make(Events, 0, 8),
+ }
}
+// stop collecting events and return what we've got.
func (w *eventCollector) stop(t *testing.T) Events {
+ return w.stopWait(t, time.Second)
+}
+
+func (w *eventCollector) stopWait(t *testing.T, waitFor time.Duration) Events {
waitForEvents()
go func() {
}()
select {
- case <-time.After(1 * time.Second):
- t.Fatal("event stream was not closed after 1 second")
+ case <-time.After(waitFor):
+ t.Fatalf("event stream was not closed after %s", waitFor)
case <-w.done:
}
w.mu.Lock()
defer w.mu.Unlock()
- return w.events
+ return w.e
+}
+
+// Get all events we've found up to now and clear the event buffer.
+func (w *eventCollector) events(t *testing.T) Events {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ e := make(Events, len(w.e))
+ copy(e, w.e)
+ w.e = make(Events, 0, 16)
+ return e
}
+// Start collecting events.
func (w *eventCollector) collect(t *testing.T) {
go func() {
for {
return
}
w.mu.Lock()
- w.events = append(w.events, e)
+ w.e = append(w.e, e)
w.mu.Unlock()
}
}
fields := strings.Fields(line)
if len(fields) < 2 {
+ if strings.ToUpper(fields[0]) == "EMPTY" {
+ for _, g := range groups {
+ events[g] = Events{}
+ }
+ continue
+ }
+
t.Fatalf("newEvents: line %d has less than 2 fields: %s", no, line)
}
})
if haveSort.String() != wantSort.String() {
+ //t.Error("\n" + ztest.Diff(indent(haveSort), indent(wantSort)))
t.Errorf("\nhave:\n%s\nwant:\n%s", indent(have), indent(want))
}
}
--- /dev/null
+//go:build darwin
+// +build darwin
+
+package internal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ SyscallEACCES = syscall.EACCES
+ UnixEACCES = unix.EACCES
+)
+
+var maxfiles uint64
+
+// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+func SetRlimit() {
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = l.Cur
+
+ if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
+ maxfiles = uint64(n)
+ }
+
+ if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
+ maxfiles = uint64(n)
+ }
+}
+
+func Maxfiles() uint64 { return maxfiles }
-//go:build !windows
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package internal
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
)
+
+var maxfiles uint64
+
+// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
+func SetRlimit() {
+ var l syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
+ if err == nil && l.Cur != l.Max {
+ l.Cur = l.Max
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
+ }
+ maxfiles = uint64(l.Cur)
+}
+
+func Maxfiles() uint64 { return maxfiles }
SyscallEACCES = errors.New("dummy")
UnixEACCES = errors.New("dummy")
)
+
+func SetRlimit() {}
+
+func Maxfiles() uint64 { return 1<<64 - 1 }