all: Bunch of more linter fixes (#5500)

This commit is contained in:
Jakob Borg 2019-02-02 11:02:28 +01:00 committed by GitHub
parent 2111386ee4
commit df5c1eaf01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 118 additions and 117 deletions

View File

@ -126,9 +126,9 @@ func (w *broadcastWriter) Serve() {
for _, ip := range dsts { for _, ip := range dsts {
dst := &net.UDPAddr{IP: ip, Port: w.port} dst := &net.UDPAddr{IP: ip, Port: w.port}
conn.SetWriteDeadline(time.Now().Add(time.Second)) _ = conn.SetWriteDeadline(time.Now().Add(time.Second))
_, err := conn.WriteTo(bs, dst) _, err := conn.WriteTo(bs, dst)
conn.SetWriteDeadline(time.Time{}) _ = conn.SetWriteDeadline(time.Time{})
if err, ok := err.(net.Error); ok && err.Timeout() { if err, ok := err.(net.Error); ok && err.Timeout() {
// Write timeouts should not happen. We treat it as a fatal // Write timeouts should not happen. We treat it as a fatal

View File

@ -117,9 +117,9 @@ func (w *multicastWriter) Serve() {
success := 0 success := 0
for _, intf := range intfs { for _, intf := range intfs {
wcm.IfIndex = intf.Index wcm.IfIndex = intf.Index
pconn.SetWriteDeadline(time.Now().Add(time.Second)) _ = pconn.SetWriteDeadline(time.Now().Add(time.Second))
_, err = pconn.WriteTo(bs, wcm, gaddr) _, err = pconn.WriteTo(bs, wcm, gaddr)
pconn.SetWriteDeadline(time.Time{}) _ = pconn.SetWriteDeadline(time.Time{})
if err != nil { if err != nil {
l.Debugln(err, "on write to", gaddr, intf.Name) l.Debugln(err, "on write to", gaddr, intf.Name)

View File

@ -71,9 +71,9 @@ func New(myID protocol.DeviceID) Configuration {
cfg.Version = CurrentVersion cfg.Version = CurrentVersion
cfg.OriginalVersion = CurrentVersion cfg.OriginalVersion = CurrentVersion
util.SetDefaults(&cfg) _ = util.SetDefaults(&cfg)
util.SetDefaults(&cfg.Options) _ = util.SetDefaults(&cfg.Options)
util.SetDefaults(&cfg.GUI) _ = util.SetDefaults(&cfg.GUI)
// Can't happen. // Can't happen.
if err := cfg.prepare(myID); err != nil { if err := cfg.prepare(myID); err != nil {
@ -86,9 +86,9 @@ func New(myID protocol.DeviceID) Configuration {
func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) { func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration var cfg Configuration
util.SetDefaults(&cfg) _ = util.SetDefaults(&cfg)
util.SetDefaults(&cfg.Options) _ = util.SetDefaults(&cfg.Options)
util.SetDefaults(&cfg.GUI) _ = util.SetDefaults(&cfg.GUI)
if err := xml.NewDecoder(r).Decode(&cfg); err != nil { if err := xml.NewDecoder(r).Decode(&cfg); err != nil {
return Configuration{}, err return Configuration{}, err
@ -104,9 +104,9 @@ func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) { func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration var cfg Configuration
util.SetDefaults(&cfg) _ = util.SetDefaults(&cfg)
util.SetDefaults(&cfg.Options) _ = util.SetDefaults(&cfg.Options)
util.SetDefaults(&cfg.GUI) _ = util.SetDefaults(&cfg.GUI)
bs, err := ioutil.ReadAll(r) bs, err := ioutil.ReadAll(r)
if err != nil { if err != nil {
@ -211,7 +211,7 @@ found:
} }
func (cfg *Configuration) clean() error { func (cfg *Configuration) clean() error {
util.FillNilSlices(&cfg.Options) _ = util.FillNilSlices(&cfg.Options)
// Prepare folders and check for duplicates. Duplicates are bad and // Prepare folders and check for duplicates. Duplicates are bad and
// dangerous, can't currently be resolved in the GUI, and shouldn't // dangerous, can't currently be resolved in the GUI, and shouldn't
@ -477,7 +477,7 @@ func convertV22V23(cfg *Configuration) {
err = fs.Remove(DefaultMarkerName) err = fs.Remove(DefaultMarkerName)
if err == nil { if err == nil {
err = fs.Mkdir(DefaultMarkerName, permBits) err = fs.Mkdir(DefaultMarkerName, permBits)
fs.Hide(DefaultMarkerName) // ignore error _ = fs.Hide(DefaultMarkerName) // ignore error
} }
if err != nil { if err != nil {
l.Infoln("Failed to upgrade folder marker:", err) l.Infoln("Failed to upgrade folder marker:", err)
@ -810,13 +810,13 @@ func cleanSymlinks(filesystem fs.Filesystem, dir string) {
// should leave alone. Deduplicated files, for example. // should leave alone. Deduplicated files, for example.
return return
} }
filesystem.Walk(dir, func(path string, info fs.FileInfo, err error) error { _ = filesystem.Walk(dir, func(path string, info fs.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
} }
if info.IsSymlink() { if info.IsSymlink() {
l.Infoln("Removing incorrectly versioned symlink", path) l.Infoln("Removing incorrectly versioned symlink", path)
filesystem.Remove(path) _ = filesystem.Remove(path)
return fs.SkipDir return fs.SkipDir
} }
return nil return nil

View File

@ -548,7 +548,7 @@ func TestPrepare(t *testing.T) {
t.Error("Expected nil") t.Error("Expected nil")
} }
cfg.prepare(device1) _ = cfg.prepare(device1)
if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddresses == nil { if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddresses == nil {
t.Error("Unexpected nil") t.Error("Unexpected nil")
@ -627,7 +627,7 @@ func TestPullOrder(t *testing.T) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
cfg := wrapper.RawCopy() cfg := wrapper.RawCopy()
cfg.WriteXML(buf) _ = cfg.WriteXML(buf)
t.Logf("%s", buf.Bytes()) t.Logf("%s", buf.Bytes())
@ -1080,7 +1080,7 @@ func TestDeviceConfigObservedNotNil(t *testing.T) {
}, },
} }
cfg.prepare(device1) _ = cfg.prepare(device1)
for _, dev := range cfg.Devices { for _, dev := range cfg.Devices {
if dev.IgnoredFolders == nil { if dev.IgnoredFolders == nil {

View File

@ -142,7 +142,7 @@ func (f *FolderConfiguration) CreateMarker() error {
} else if err := dir.Sync(); err != nil { } else if err := dir.Sync(); err != nil {
l.Debugln("folder marker: fsync . failed:", err) l.Debugln("folder marker: fsync . failed:", err)
} }
fs.Hide(DefaultMarkerName) _ = fs.Hide(DefaultMarkerName)
return nil return nil
} }

View File

@ -448,7 +448,7 @@ func (w *Wrapper) MyName() string {
} }
func (w *Wrapper) AddOrUpdatePendingDevice(device protocol.DeviceID, name, address string) { func (w *Wrapper) AddOrUpdatePendingDevice(device protocol.DeviceID, name, address string) {
defer w.Save() defer func() { _ = w.Save() }()
w.mut.Lock() w.mut.Lock()
defer w.mut.Unlock() defer w.mut.Unlock()
@ -471,7 +471,7 @@ func (w *Wrapper) AddOrUpdatePendingDevice(device protocol.DeviceID, name, addre
} }
func (w *Wrapper) AddOrUpdatePendingFolder(id, label string, device protocol.DeviceID) { func (w *Wrapper) AddOrUpdatePendingFolder(id, label string, device protocol.DeviceID) {
defer w.Save() defer func() { _ = w.Save() }()
w.mut.Lock() w.mut.Lock()
defer w.mut.Unlock() defer w.mut.Unlock()

View File

@ -257,17 +257,17 @@ func take(waiter waiter, tokens int) {
if tokens < limiterBurstSize { if tokens < limiterBurstSize {
// This is the by far more common case so we get it out of the way // This is the by far more common case so we get it out of the way
// early. // early.
waiter.WaitN(context.TODO(), tokens) _ = waiter.WaitN(context.TODO(), tokens)
return return
} }
for tokens > 0 { for tokens > 0 {
// Consume limiterBurstSize tokens at a time until we're done. // Consume limiterBurstSize tokens at a time until we're done.
if tokens > limiterBurstSize { if tokens > limiterBurstSize {
waiter.WaitN(context.TODO(), limiterBurstSize) _ = waiter.WaitN(context.TODO(), limiterBurstSize)
tokens -= limiterBurstSize tokens -= limiterBurstSize
} else { } else {
waiter.WaitN(context.TODO(), tokens) _ = waiter.WaitN(context.TODO(), tokens)
tokens = 0 tokens = 0
} }
} }

View File

@ -190,7 +190,7 @@ next:
continue continue
} }
c.SetDeadline(time.Now().Add(20 * time.Second)) _ = c.SetDeadline(time.Now().Add(20 * time.Second))
hello, err := protocol.ExchangeHello(c, s.model.GetHello(remoteID)) hello, err := protocol.ExchangeHello(c, s.model.GetHello(remoteID))
if err != nil { if err != nil {
if protocol.IsVersionMismatch(err) { if protocol.IsVersionMismatch(err) {
@ -214,7 +214,7 @@ next:
c.Close() c.Close()
continue continue
} }
c.SetDeadline(time.Time{}) _ = c.SetDeadline(time.Time{})
// The Model will return an error for devices that we don't want to // The Model will return an error for devices that we don't want to
// have a connection with for whatever reason, for example unknown devices. // have a connection with for whatever reason, for example unknown devices.
@ -569,7 +569,7 @@ func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
for addr, listener := range s.listeners { for addr, listener := range s.listeners {
if _, ok := seen[addr]; !ok || listener.Factory().Valid(to) != nil { if _, ok := seen[addr]; !ok || listener.Factory().Valid(to) != nil {
l.Debugln("Stopping listener", addr) l.Debugln("Stopping listener", addr)
s.listenerSupervisor.Remove(s.listenerTokens[addr]) _ = s.listenerSupervisor.Remove(s.listenerTokens[addr])
delete(s.listenerTokens, addr) delete(s.listenerTokens, addr)
delete(s.listeners, addr) delete(s.listeners, addr)
} }
@ -582,7 +582,7 @@ func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
s.natServiceToken = &token s.natServiceToken = &token
} else if !to.Options.NATEnabled && s.natServiceToken != nil { } else if !to.Options.NATEnabled && s.natServiceToken != nil {
l.Debugln("Stopping NAT service") l.Debugln("Stopping NAT service")
s.Remove(*s.natServiceToken) _ = s.Remove(*s.natServiceToken)
s.natServiceToken = nil s.natServiceToken = nil
} }
@ -717,8 +717,8 @@ func warningFor(dev protocol.DeviceID, msg string) {
} }
func tlsTimedHandshake(tc *tls.Conn) error { func tlsTimedHandshake(tc *tls.Conn) error {
tc.SetDeadline(time.Now().Add(tlsHandshakeTimeout)) _ = tc.SetDeadline(time.Now().Add(tlsHandshakeTimeout))
defer tc.SetDeadline(time.Time{}) defer func() { _ = tc.SetDeadline(time.Time{}) }()
return tc.Handshake() return tc.Handshake()
} }

View File

@ -89,8 +89,8 @@ func (c internalConn) Close() {
// *tls.Conn.Close() does more than it says on the tin. Specifically, it // *tls.Conn.Close() does more than it says on the tin. Specifically, it
// sends a TLS alert message, which might block forever if the // sends a TLS alert message, which might block forever if the
// connection is dead and we don't have a deadline set. // connection is dead and we don't have a deadline set.
c.SetWriteDeadline(time.Now().Add(250 * time.Millisecond)) _ = c.SetWriteDeadline(time.Now().Add(250 * time.Millisecond))
c.Conn.Close() _ = c.Conn.Close()
} }
func (c internalConn) Type() string { func (c internalConn) Type() string {

View File

@ -83,7 +83,7 @@ func (t *tcpListener) Serve() {
const maxAcceptFailures = 10 const maxAcceptFailures = 10
for { for {
listener.SetDeadline(time.Now().Add(time.Second)) _ = listener.SetDeadline(time.Now().Add(time.Second))
conn, err := listener.Accept() conn, err := listener.Accept()
select { select {
case <-t.stop: case <-t.stop:

View File

@ -19,7 +19,7 @@ func TestIgnoredFiles(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
db := NewLowlevel(ldb, "<memory>") db := NewLowlevel(ldb, "<memory>")
UpdateSchema(db) _ = UpdateSchema(db)
fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
@ -204,7 +204,7 @@ func TestUpdate0to3(t *testing.T) {
func TestDowngrade(t *testing.T) { func TestDowngrade(t *testing.T) {
db := OpenMemory() db := OpenMemory()
UpdateSchema(db) // sets the min version etc _ = UpdateSchema(db) // sets the min version etc
// Bump the database version to something newer than we actually support // Bump the database version to something newer than we actually support
miscDB := NewMiscDataNamespace(db) miscDB := NewMiscDataNamespace(db)

View File

@ -13,12 +13,9 @@ import (
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
) )
type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator)
type instance struct { type instance struct {
*Lowlevel *Lowlevel
keyer keyer keyer keyer

View File

@ -61,7 +61,7 @@ func (n *NamespacedKV) Reset() {
func (n *NamespacedKV) PutInt64(key string, val int64) { func (n *NamespacedKV) PutInt64(key string, val int64) {
var valBs [8]byte var valBs [8]byte
binary.BigEndian.PutUint64(valBs[:], uint64(val)) binary.BigEndian.PutUint64(valBs[:], uint64(val))
n.db.Put(n.prefixedKey(key), valBs[:], nil) _ = n.db.Put(n.prefixedKey(key), valBs[:], nil)
} }
// Int64 returns the stored value interpreted as an int64 and a boolean that // Int64 returns the stored value interpreted as an int64 and a boolean that
@ -79,7 +79,7 @@ func (n *NamespacedKV) Int64(key string) (int64, bool) {
// type) is overwritten. // type) is overwritten.
func (n *NamespacedKV) PutTime(key string, val time.Time) { func (n *NamespacedKV) PutTime(key string, val time.Time) {
valBs, _ := val.MarshalBinary() // never returns an error valBs, _ := val.MarshalBinary() // never returns an error
n.db.Put(n.prefixedKey(key), valBs, nil) _ = n.db.Put(n.prefixedKey(key), valBs, nil)
} }
// Time returns the stored value interpreted as a time.Time and a boolean // Time returns the stored value interpreted as a time.Time and a boolean
@ -97,7 +97,7 @@ func (n NamespacedKV) Time(key string) (time.Time, bool) {
// PutString stores a new string. Any existing value (even if of another type) // PutString stores a new string. Any existing value (even if of another type)
// is overwritten. // is overwritten.
func (n *NamespacedKV) PutString(key, val string) { func (n *NamespacedKV) PutString(key, val string) {
n.db.Put(n.prefixedKey(key), []byte(val), nil) _ = n.db.Put(n.prefixedKey(key), []byte(val), nil)
} }
// String returns the stored value interpreted as a string and a boolean that // String returns the stored value interpreted as a string and a boolean that
@ -113,7 +113,7 @@ func (n NamespacedKV) String(key string) (string, bool) {
// PutBytes stores a new byte slice. Any existing value (even if of another type) // PutBytes stores a new byte slice. Any existing value (even if of another type)
// is overwritten. // is overwritten.
func (n *NamespacedKV) PutBytes(key string, val []byte) { func (n *NamespacedKV) PutBytes(key string, val []byte) {
n.db.Put(n.prefixedKey(key), val, nil) _ = n.db.Put(n.prefixedKey(key), val, nil)
} }
// Bytes returns the stored value as a raw byte slice and a boolean that // Bytes returns the stored value as a raw byte slice and a boolean that
@ -130,9 +130,9 @@ func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
// is overwritten. // is overwritten.
func (n *NamespacedKV) PutBool(key string, val bool) { func (n *NamespacedKV) PutBool(key string, val bool) {
if val { if val {
n.db.Put(n.prefixedKey(key), []byte{0x0}, nil) _ = n.db.Put(n.prefixedKey(key), []byte{0x0}, nil)
} else { } else {
n.db.Put(n.prefixedKey(key), []byte{0x1}, nil) _ = n.db.Put(n.prefixedKey(key), []byte{0x1}, nil)
} }
} }
@ -149,7 +149,7 @@ func (n NamespacedKV) Bool(key string) (bool, bool) {
// Delete deletes the specified key. It is allowed to delete a nonexistent // Delete deletes the specified key. It is allowed to delete a nonexistent
// key. // key.
func (n NamespacedKV) Delete(key string) { func (n NamespacedKV) Delete(key string) {
n.db.Delete(n.prefixedKey(key), nil) _ = n.db.Delete(n.prefixedKey(key), nil)
} }
func (n NamespacedKV) prefixedKey(key string) []byte { func (n NamespacedKV) prefixedKey(key string) []byte {

View File

@ -101,7 +101,7 @@ func (s *FileSet) recalcCounts() {
}) })
s.meta.SetCreated() s.meta.SetCreated()
s.meta.toDB(s.db, []byte(s.folder)) _ = s.meta.toDB(s.db, []byte(s.folder))
} }
func (s *FileSet) Drop(device protocol.DeviceID) { func (s *FileSet) Drop(device protocol.DeviceID) {
@ -127,7 +127,7 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
s.meta.resetAll(device) s.meta.resetAll(device)
} }
s.meta.toDB(s.db, []byte(s.folder)) _ = s.meta.toDB(s.db, []byte(s.folder))
} }
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) { func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
@ -141,7 +141,7 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
s.updateMutex.Lock() s.updateMutex.Lock()
defer s.updateMutex.Unlock() defer s.updateMutex.Unlock()
defer s.meta.toDB(s.db, []byte(s.folder)) defer func() { _ = s.meta.toDB(s.db, []byte(s.folder)) }()
if device == protocol.LocalDeviceID { if device == protocol.LocalDeviceID {
// For the local device we have a bunch of metadata to track. // For the local device we have a bunch of metadata to track.
@ -295,7 +295,7 @@ func DropDeltaIndexIDs(db *Lowlevel) {
dbi := db.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil) dbi := db.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil)
defer dbi.Release() defer dbi.Release()
for dbi.Next() { for dbi.Next() {
db.Delete(dbi.Key(), nil) _ = db.Delete(dbi.Key(), nil)
} }
} }

View File

@ -82,7 +82,7 @@ func (i *smallIndex) ID(val []byte) uint32 {
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
copy(key, i.prefix) copy(key, i.prefix)
binary.BigEndian.PutUint32(key[len(i.prefix):], id) binary.BigEndian.PutUint32(key[len(i.prefix):], id)
i.db.Put(key, val, nil) _ = i.db.Put(key, val, nil)
i.mut.Unlock() i.mut.Unlock()
return id return id
@ -115,7 +115,7 @@ func (i *smallIndex) Delete(val []byte) {
// Put an empty value into the database. This indicates that the // Put an empty value into the database. This indicates that the
// entry does not exist any more and prevents the ID from being // entry does not exist any more and prevents the ID from being
// reused in the future. // reused in the future.
i.db.Put(key, []byte{}, nil) _ = i.db.Put(key, []byte{}, nil)
// Delete reverse mapping. // Delete reverse mapping.
delete(i.id2val, id) delete(i.id2val, id)

View File

@ -127,7 +127,7 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
var fl VersionList var fl VersionList
if svl, err := t.Get(gk, nil); err == nil { if svl, err := t.Get(gk, nil); err == nil {
fl.Unmarshal(svl) // Ignore error, continue with empty fl _ = fl.Unmarshal(svl) // Ignore error, continue with empty fl
} }
fl, removedFV, removedAt, insertedAt := fl.update(folder, device, file, t.readOnlyTransaction) fl, removedFV, removedAt, insertedAt := fl.update(folder, device, file, t.readOnlyTransaction)
if insertedAt == -1 { if insertedAt == -1 {

View File

@ -23,13 +23,17 @@ func writeJSONS(w io.Writer, db *leveldb.DB) {
defer it.Release() defer it.Release()
enc := json.NewEncoder(w) enc := json.NewEncoder(w)
for it.Next() { for it.Next() {
enc.Encode(map[string][]byte{ _ = enc.Encode(map[string][]byte{
"k": it.Key(), "k": it.Key(),
"v": it.Value(), "v": it.Value(),
}) })
} }
} }
// we know this function isn't generally used, nonetheless we want it in
// here and the linter to not complain.
var _ = writeJSONS
// openJSONS reads a JSON stream file into a leveldb.DB // openJSONS reads a JSON stream file into a leveldb.DB
func openJSONS(file string) (*leveldb.DB, error) { func openJSONS(file string) (*leveldb.DB, error) {
fd, err := os.Open(file) fd, err := os.Open(file)
@ -50,7 +54,7 @@ func openJSONS(file string) (*leveldb.DB, error) {
return nil, err return nil, err
} }
db.Put(row["k"], row["v"], nil) _ = db.Put(row["k"], row["v"], nil)
} }
return db, nil return db, nil

View File

@ -62,7 +62,7 @@ func dialWithFallback(proxyDialFunc dialFunc, fallbackDialFunc dialFunc, network
conn, err := proxyDialFunc(network, addr) conn, err := proxyDialFunc(network, addr)
if err == nil { if err == nil {
l.Debugf("Dialing %s address %s via proxy - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr()) l.Debugf("Dialing %s address %s via proxy - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr())
SetTCPOptions(conn) _ = SetTCPOptions(conn)
return dialerConn{ return dialerConn{
conn, newDialerAddr(network, addr), conn, newDialerAddr(network, addr),
}, nil }, nil
@ -76,7 +76,7 @@ func dialWithFallback(proxyDialFunc dialFunc, fallbackDialFunc dialFunc, network
conn, err = fallbackDialFunc(network, addr) conn, err = fallbackDialFunc(network, addr)
if err == nil { if err == nil {
l.Debugf("Dialing %s address %s via fallback - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr()) l.Debugf("Dialing %s address %s via fallback - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr())
SetTCPOptions(conn) _ = SetTCPOptions(conn)
} else { } else {
l.Debugf("Dialing %s address %s via fallback - error %s", network, addr, err) l.Debugf("Dialing %s address %s via fallback - error %s", network, addr, err)
} }

View File

@ -96,7 +96,7 @@ func TestCacheSlowLookup(t *testing.T) {
// Start a lookup, which will take at least a second // Start a lookup, which will take at least a second
t0 := time.Now() t0 := time.Now()
go c.Lookup(protocol.LocalDeviceID) go func() { _, _ = c.Lookup(protocol.LocalDeviceID) }()
<-started // The slow lookup method has been called so we're inside the lock <-started // The slow lookup method has been called so we're inside the lock
// It should be possible to get ChildErrors while it's running // It should be possible to get ChildErrors while it's running

View File

@ -77,7 +77,7 @@ func TestGlobalOverHTTP(t *testing.T) {
s := new(fakeDiscoveryServer) s := new(fakeDiscoveryServer)
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc("/", s.handler) mux.HandleFunc("/", s.handler)
go http.Serve(list, mux) go func() { _ = http.Serve(list, mux) }()
// This should succeed // This should succeed
addresses, err := testLookup("http://" + list.Addr().String() + "?insecure&noannounce") addresses, err := testLookup("http://" + list.Addr().String() + "?insecure&noannounce")
@ -125,7 +125,7 @@ func TestGlobalOverHTTPS(t *testing.T) {
s := new(fakeDiscoveryServer) s := new(fakeDiscoveryServer)
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc("/", s.handler) mux.HandleFunc("/", s.handler)
go http.Serve(list, mux) go func() { _ = http.Serve(list, mux) }()
// With default options the lookup code expects the server certificate to // With default options the lookup code expects the server certificate to
// check out according to the usual CA chains etc. That won't be the case // check out according to the usual CA chains etc. That won't be the case
@ -190,7 +190,7 @@ func TestGlobalAnnounce(t *testing.T) {
s := new(fakeDiscoveryServer) s := new(fakeDiscoveryServer)
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc("/", s.handler) mux.HandleFunc("/", s.handler)
go http.Serve(list, mux) go func() { _ = http.Serve(list, mux) }()
url := "https://" + list.Addr().String() + "?insecure" url := "https://" + list.Addr().String() + "?insecure"
disco, err := NewGlobal(url, cert, new(fakeAddressLister)) disco, err := NewGlobal(url, cert, new(fakeAddressLister))
@ -242,7 +242,7 @@ func (s *fakeDiscoveryServer) handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204) w.WriteHeader(204)
} else { } else {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"addresses":["tcp://192.0.2.42::22000"], "relays":[{"url": "relay://192.0.2.43:443", "latency": 42}]}`)) _, _ = w.Write([]byte(`{"addresses":["tcp://192.0.2.42::22000"], "relays":[{"url": "relay://192.0.2.43:443", "latency": 42}]}`))
} }
} }

View File

@ -33,7 +33,7 @@ func TestChmodFile(t *testing.T) {
path := filepath.Join(dir, "file") path := filepath.Join(dir, "file")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
defer os.Chmod(path, 0666) defer func() { _ = os.Chmod(path, 0666) }()
fd, err := os.Create(path) fd, err := os.Create(path)
if err != nil { if err != nil {
@ -74,7 +74,7 @@ func TestChownFile(t *testing.T) {
path := filepath.Join(dir, "file") path := filepath.Join(dir, "file")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
defer os.Chmod(path, 0666) defer func() { _ = os.Chmod(path, 0666) }()
fd, err := os.Create(path) fd, err := os.Create(path)
if err != nil { if err != nil {
@ -116,7 +116,7 @@ func TestChmodDir(t *testing.T) {
mode = os.FileMode(0777) mode = os.FileMode(0777)
} }
defer os.Chmod(path, mode) defer func() { _ = os.Chmod(path, mode) }()
if err := os.Mkdir(path, mode); err != nil { if err := os.Mkdir(path, mode); err != nil {
t.Error(err) t.Error(err)
@ -147,7 +147,7 @@ func TestChtimes(t *testing.T) {
mtime := time.Now().Add(-time.Hour) mtime := time.Now().Add(-time.Hour)
fs.Chtimes("file", mtime, mtime) _ = fs.Chtimes("file", mtime, mtime)
stat, err := os.Stat(path) stat, err := os.Stat(path)
if err != nil { if err != nil {

View File

@ -98,7 +98,7 @@ func TestWatchInclude(t *testing.T) {
file := "file" file := "file"
ignored := "ignored" ignored := "ignored"
testFs.MkdirAll(filepath.Join(name, ignored), 0777) _ = testFs.MkdirAll(filepath.Join(name, ignored), 0777)
included := filepath.Join(ignored, "included") included := filepath.Join(ignored, "included")
testCase := func() { testCase := func() {
@ -274,7 +274,7 @@ func TestWatchSymlinkedRoot(t *testing.T) {
if err := testFs.MkdirAll(name, 0755); err != nil { if err := testFs.MkdirAll(name, 0755); err != nil {
panic(fmt.Sprintf("Failed to create directory %s: %s", name, err)) panic(fmt.Sprintf("Failed to create directory %s: %s", name, err))
} }
defer testFs.RemoveAll(name) defer func() { _ = testFs.RemoveAll(name) }()
root := filepath.Join(name, "root") root := filepath.Join(name, "root")
if err := testFs.MkdirAll(root, 0777); err != nil { if err := testFs.MkdirAll(root, 0777); err != nil {
@ -376,7 +376,7 @@ func testScenario(t *testing.T, name string, testCase func(), expectedEvents, al
if err := testFs.MkdirAll(name, 0755); err != nil { if err := testFs.MkdirAll(name, 0755); err != nil {
panic(fmt.Sprintf("Failed to create directory %s: %s", name, err)) panic(fmt.Sprintf("Failed to create directory %s: %s", name, err))
} }
defer testFs.RemoveAll(name) defer func() { _ = testFs.RemoveAll(name) }()
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()

View File

@ -105,22 +105,22 @@ func newFakeFilesystem(root string) *fakefs {
for (files == 0 || createdFiles < files) && (maxsize == 0 || writtenData>>20 < int64(maxsize)) { for (files == 0 || createdFiles < files) && (maxsize == 0 || writtenData>>20 < int64(maxsize)) {
dir := filepath.Join(fmt.Sprintf("%02x", rng.Intn(255)), fmt.Sprintf("%02x", rng.Intn(255))) dir := filepath.Join(fmt.Sprintf("%02x", rng.Intn(255)), fmt.Sprintf("%02x", rng.Intn(255)))
file := fmt.Sprintf("%016x", rng.Int63()) file := fmt.Sprintf("%016x", rng.Int63())
fs.MkdirAll(dir, 0755) _ = fs.MkdirAll(dir, 0755)
fd, _ := fs.Create(filepath.Join(dir, file)) fd, _ := fs.Create(filepath.Join(dir, file))
createdFiles++ createdFiles++
fsize := int64(sizeavg/2 + rng.Intn(sizeavg)) fsize := int64(sizeavg/2 + rng.Intn(sizeavg))
fd.Truncate(fsize) _ = fd.Truncate(fsize)
writtenData += fsize writtenData += fsize
ftime := time.Unix(1000000000+rng.Int63n(10*365*86400), 0) ftime := time.Unix(1000000000+rng.Int63n(10*365*86400), 0)
fs.Chtimes(filepath.Join(dir, file), ftime, ftime) _ = fs.Chtimes(filepath.Join(dir, file), ftime, ftime)
} }
} }
// Also create a default folder marker for good measure // Also create a default folder marker for good measure
fs.Mkdir(".stfolder", 0700) _ = fs.Mkdir(".stfolder", 0700)
fakefsFs[root] = fs fakefsFs[root] = fs
return fs return fs
@ -583,7 +583,7 @@ func (f *fakeFile) readShortAt(p []byte, offs int64) (int, error) {
// name. // name.
if f.seed == 0 { if f.seed == 0 {
hf := fnv.New64() hf := fnv.New64()
hf.Write([]byte(f.name)) _, _ = hf.Write([]byte(f.name))
f.seed = int64(hf.Sum64()) f.seed = int64(hf.Sum64())
} }
@ -601,7 +601,7 @@ func (f *fakeFile) readShortAt(p []byte, offs int64) (int, error) {
diff := offs - minOffs diff := offs - minOffs
if diff > 0 { if diff > 0 {
lr := io.LimitReader(f.rng, diff) lr := io.LimitReader(f.rng, diff)
io.Copy(ioutil.Discard, lr) _, _ = io.Copy(ioutil.Discard, lr)
} }
f.offset = offs f.offset = offs

View File

@ -130,10 +130,10 @@ func TestFakeFSRead(t *testing.T) {
// Create // Create
fd, _ := fs.Create("test") fd, _ := fs.Create("test")
fd.Truncate(3 * 1 << randomBlockShift) _ = fd.Truncate(3 * 1 << randomBlockShift)
// Read // Read
fd.Seek(0, 0) _, _ = fd.Seek(0, 0)
bs0, err := ioutil.ReadAll(fd) bs0, err := ioutil.ReadAll(fd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -143,7 +143,7 @@ func TestFakeFSRead(t *testing.T) {
} }
// Read again, starting at an odd offset // Read again, starting at an odd offset
fd.Seek(0, 0) _, _ = fd.Seek(0, 0)
buf0 := make([]byte, 12345) buf0 := make([]byte, 12345)
n, _ := fd.Read(buf0) n, _ := fd.Read(buf0)
if n != len(buf0) { if n != len(buf0) {

View File

@ -53,7 +53,7 @@ func (f *MtimeFS) Chtimes(name string, atime, mtime time.Time) error {
} }
// Do a normal Chtimes call, don't care if it succeeds or not. // Do a normal Chtimes call, don't care if it succeeds or not.
f.chtimes(name, atime, mtime) _ = f.chtimes(name, atime, mtime)
// Stat the file to see what happened. Here we *do* return an error, // Stat the file to see what happened. Here we *do* return an error,
// because it might be "does not exist" or similar. // because it might be "does not exist" or similar.

View File

@ -18,10 +18,10 @@ import (
func TestMtimeFS(t *testing.T) { func TestMtimeFS(t *testing.T) {
os.RemoveAll("testdata") os.RemoveAll("testdata")
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
os.Mkdir("testdata", 0755) _ = os.Mkdir("testdata", 0755)
ioutil.WriteFile("testdata/exists0", []byte("hello"), 0644) _ = ioutil.WriteFile("testdata/exists0", []byte("hello"), 0644)
ioutil.WriteFile("testdata/exists1", []byte("hello"), 0644) _ = ioutil.WriteFile("testdata/exists1", []byte("hello"), 0644)
ioutil.WriteFile("testdata/exists2", []byte("hello"), 0644) _ = ioutil.WriteFile("testdata/exists2", []byte("hello"), 0644)
// a random time with nanosecond precision // a random time with nanosecond precision
testTime := time.Unix(1234567890, 123456789) testTime := time.Unix(1234567890, 123456789)
@ -73,7 +73,7 @@ func TestMtimeFS(t *testing.T) {
// filesystems. // filesystems.
testTime = time.Now().Add(5 * time.Hour).Truncate(time.Minute) testTime = time.Now().Add(5 * time.Hour).Truncate(time.Minute)
os.Chtimes("testdata/exists0", testTime, testTime) _ = os.Chtimes("testdata/exists0", testTime, testTime)
if info, err := mtimefs.Lstat("testdata/exists0"); err != nil { if info, err := mtimefs.Lstat("testdata/exists0"); err != nil {
t.Error("Lstat shouldn't fail:", err) t.Error("Lstat shouldn't fail:", err)
} else if !info.ModTime().Equal(testTime) { } else if !info.ModTime().Equal(testTime) {
@ -93,8 +93,8 @@ func TestMtimeFSInsensitive(t *testing.T) {
theTest := func(t *testing.T, fs *MtimeFS, shouldSucceed bool) { theTest := func(t *testing.T, fs *MtimeFS, shouldSucceed bool) {
os.RemoveAll("testdata") os.RemoveAll("testdata")
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
os.Mkdir("testdata", 0755) _ = os.Mkdir("testdata", 0755)
ioutil.WriteFile("testdata/FiLe", []byte("hello"), 0644) _ = ioutil.WriteFile("testdata/FiLe", []byte("hello"), 0644)
// a random time with nanosecond precision // a random time with nanosecond precision
testTime := time.Unix(1234567890, 123456789) testTime := time.Unix(1234567890, 123456789)

View File

@ -51,7 +51,7 @@ func TempNameWithPrefix(name, prefix string) string {
tbase := filepath.Base(name) tbase := filepath.Base(name)
if len(tbase) > maxFilenameLength { if len(tbase) > maxFilenameLength {
hash := md5.New() hash := md5.New()
hash.Write([]byte(name)) _, _ = hash.Write([]byte(name))
tbase = fmt.Sprintf("%x", hash.Sum(nil)) tbase = fmt.Sprintf("%x", hash.Sum(nil))
} }
tname := fmt.Sprintf("%s%s.tmp", prefix, tbase) tname := fmt.Sprintf("%s%s.tmp", prefix, tbase)

View File

@ -136,7 +136,7 @@ func (m *Matcher) Load(file string) error {
fd, info, err := loadIgnoreFile(m.fs, file, m.changeDetector) fd, info, err := loadIgnoreFile(m.fs, file, m.changeDetector)
if err != nil { if err != nil {
m.parseLocked(&bytes.Buffer{}, file) _ = m.parseLocked(&bytes.Buffer{}, file)
return err return err
} }
defer fd.Close() defer fd.Close()
@ -310,8 +310,8 @@ func (m *Matcher) SkipIgnoredDirs() bool {
func hashPatterns(patterns []Pattern) string { func hashPatterns(patterns []Pattern) string {
h := md5.New() h := md5.New()
for _, pat := range patterns { for _, pat := range patterns {
h.Write([]byte(pat.String())) _, _ = h.Write([]byte(pat.String()))
h.Write([]byte("\n")) _, _ = h.Write([]byte("\n"))
} }
return fmt.Sprintf("%x", h.Sum(nil)) return fmt.Sprintf("%x", h.Sum(nil))
} }
@ -505,7 +505,7 @@ func WriteIgnores(filesystem fs.Filesystem, path string, content []string) error
if err := fd.Close(); err != nil { if err := fd.Close(); err != nil {
return err return err
} }
filesystem.Hide(path) _ = filesystem.Hide(path)
return nil return nil
} }

View File

@ -246,15 +246,15 @@ func TestCaching(t *testing.T) {
defer fd1.Close() defer fd1.Close()
defer fd2.Close() defer fd2.Close()
defer fs.Remove(fd1.Name()) defer func() { _ = fs.Remove(fd1.Name()) }()
defer fs.Remove(fd2.Name()) defer func() { _ = fs.Remove(fd2.Name()) }()
_, err = fd1.Write([]byte("/x/\n#include " + filepath.Base(fd2.Name()) + "\n")) _, err = fd1.Write([]byte("/x/\n#include " + filepath.Base(fd2.Name()) + "\n"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fd2.Write([]byte("/y/\n")) _, _ = fd2.Write([]byte("/y/\n"))
pats := New(fs, WithCache(true)) pats := New(fs, WithCache(true))
err = pats.Load(fd1.Name()) err = pats.Load(fd1.Name())
@ -289,10 +289,10 @@ func TestCaching(t *testing.T) {
// Modify the include file, expect empty cache. Ensure the timestamp on // Modify the include file, expect empty cache. Ensure the timestamp on
// the file changes. // the file changes.
fd2.Write([]byte("/z/\n")) _, _ = fd2.Write([]byte("/z/\n"))
fd2.Sync() _ = fd2.Sync()
fakeTime := time.Now().Add(5 * time.Second) fakeTime := time.Now().Add(5 * time.Second)
fs.Chtimes(fd2.Name(), fakeTime, fakeTime) _ = fs.Chtimes(fd2.Name(), fakeTime, fakeTime)
err = pats.Load(fd1.Name()) err = pats.Load(fd1.Name())
if err != nil { if err != nil {
@ -321,10 +321,10 @@ func TestCaching(t *testing.T) {
// Modify the root file, expect cache to be invalidated // Modify the root file, expect cache to be invalidated
fd1.Write([]byte("/a/\n")) _, _ = fd1.Write([]byte("/a/\n"))
fd1.Sync() _ = fd1.Sync()
fakeTime = time.Now().Add(5 * time.Second) fakeTime = time.Now().Add(5 * time.Second)
fs.Chtimes(fd1.Name(), fakeTime, fakeTime) _ = fs.Chtimes(fd1.Name(), fakeTime, fakeTime)
err = pats.Load(fd1.Name()) err = pats.Load(fd1.Name())
if err != nil { if err != nil {
@ -434,7 +434,7 @@ flamingo
_, err = fd.Write([]byte(stignore)) _, err = fd.Write([]byte(stignore))
defer fd.Close() defer fd.Close()
defer fs.Remove(fd.Name()) defer func() { _ = fs.Remove(fd.Name()) }()
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -474,7 +474,7 @@ func TestCacheReload(t *testing.T) {
} }
defer fd.Close() defer fd.Close()
defer fs.Remove(fd.Name()) defer func() { _ = fs.Remove(fd.Name()) }()
// Ignore file matches f1 and f2 // Ignore file matches f1 and f2
@ -515,9 +515,9 @@ func TestCacheReload(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fd.Sync() _ = fd.Sync()
fakeTime := time.Now().Add(5 * time.Second) fakeTime := time.Now().Add(5 * time.Second)
fs.Chtimes(fd.Name(), fakeTime, fakeTime) _ = fs.Chtimes(fd.Name(), fakeTime, fakeTime)
err = pats.Load(fd.Name()) err = pats.Load(fd.Name())
if err != nil { if err != nil {
@ -605,7 +605,7 @@ func TestHashOfEmpty(t *testing.T) {
// recalculate the hash. d41d8cd98f00b204e9800998ecf8427e is the md5 of // recalculate the hash. d41d8cd98f00b204e9800998ecf8427e is the md5 of
// nothing. // nothing.
p1.Load("file/does/not/exist") _ = p1.Load("file/does/not/exist")
secondHash := p1.Hash() secondHash := p1.Hash()
if firstHash == secondHash { if firstHash == secondHash {

View File

@ -120,7 +120,7 @@ func (l *logger) debugln(level int, vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(level, "DEBUG: "+s) _ = l.logger.Output(level, "DEBUG: "+s)
l.callHandlers(LevelDebug, s) l.callHandlers(LevelDebug, s)
} }
@ -132,7 +132,7 @@ func (l *logger) debugf(level int, format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(level, "DEBUG: "+s) _ = l.logger.Output(level, "DEBUG: "+s)
l.callHandlers(LevelDebug, s) l.callHandlers(LevelDebug, s)
} }
@ -141,7 +141,7 @@ func (l *logger) Verboseln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "VERBOSE: "+s) _ = l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s) l.callHandlers(LevelVerbose, s)
} }
@ -150,7 +150,7 @@ func (l *logger) Verbosef(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "VERBOSE: "+s) _ = l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s) l.callHandlers(LevelVerbose, s)
} }
@ -159,7 +159,7 @@ func (l *logger) Infoln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "INFO: "+s) _ = l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s) l.callHandlers(LevelInfo, s)
} }
@ -168,7 +168,7 @@ func (l *logger) Infof(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "INFO: "+s) _ = l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s) l.callHandlers(LevelInfo, s)
} }
@ -177,7 +177,7 @@ func (l *logger) Warnln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "WARNING: "+s) _ = l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s) l.callHandlers(LevelWarn, s)
} }
@ -186,7 +186,7 @@ func (l *logger) Warnf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "WARNING: "+s) _ = l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s) l.callHandlers(LevelWarn, s)
} }
@ -196,7 +196,7 @@ func (l *logger) Fatalln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "FATAL: "+s) _ = l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s) l.callHandlers(LevelFatal, s)
os.Exit(1) os.Exit(1)
} }
@ -207,7 +207,7 @@ func (l *logger) Fatalf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
l.logger.Output(2, "FATAL: "+s) _ = l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s) l.callHandlers(LevelFatal, s)
os.Exit(1) os.Exit(1)
} }