diff --git a/bench/bench_writer/bench_writer.go b/bench/bench_writer/bench_writer.go index 231c99084..8fb72946c 100644 --- a/bench/bench_writer/bench_writer.go +++ b/bench/bench_writer/bench_writer.go @@ -31,9 +31,9 @@ func main() { log.SetPrefix("[bench_writer] ") msg := make([]byte, *size) - batch := make([][]byte, 0) - for i := 0; i < *batchSize; i++ { - batch = append(batch, msg) + batch := make([][]byte, *batchSize) + for i := range batch { + batch[i] = msg } goChan := make(chan int) diff --git a/nsqd/http_test.go b/nsqd/http_test.go index dbe1ea342..5359c1508 100644 --- a/nsqd/http_test.go +++ b/nsqd/http_test.go @@ -72,9 +72,9 @@ func TestHTTPmput(t *testing.T) { topic := nsqd.GetTopic(topicName) msg := []byte("test message") - msgs := make([][]byte, 0) - for i := 0; i < 4; i++ { - msgs = append(msgs, msg) + msgs := make([][]byte, 4) + for i := range msgs { + msgs[i] = msg } buf := bytes.NewBuffer(bytes.Join(msgs, []byte("\n"))) @@ -100,9 +100,9 @@ func TestHTTPmputEmpty(t *testing.T) { topic := nsqd.GetTopic(topicName) msg := []byte("test message") - msgs := make([][]byte, 0) - for i := 0; i < 4; i++ { - msgs = append(msgs, msg) + msgs := make([][]byte, 4) + for i := range msgs { + msgs[i] = msg } buf := bytes.NewBuffer(bytes.Join(msgs, []byte("\n"))) _, err := buf.Write([]byte("\n")) @@ -129,9 +129,9 @@ func TestHTTPmputBinary(t *testing.T) { topicName := "test_http_mput_bin" + strconv.Itoa(int(time.Now().Unix())) topic := nsqd.GetTopic(topicName) - mpub := make([][]byte, 0) - for i := 0; i < 5; i++ { - mpub = append(mpub, make([]byte, 100)) + mpub := make([][]byte, 5) + for i := range mpub { + mpub[i] = make([]byte, 100) } cmd, _ := nsq.MultiPublish(topicName, mpub) buf := bytes.NewBuffer(cmd.Body) diff --git a/nsqd/lookup.go b/nsqd/lookup.go index fb29030b7..109865d4a 100644 --- a/nsqd/lookup.go +++ b/nsqd/lookup.go @@ -105,7 +105,7 @@ func (n *NSQD) lookupLoop() { } } case lookupPeer := <-syncTopicChan: - commands := make([]*nsq.Command, 0) + var commands []*nsq.Command // build all the commands first so we exit the lock(s) as fast as possible n.RLock() for _, topic := range n.topicMap { diff --git a/nsqd/nsqd.go b/nsqd/nsqd.go index d61081b50..dd75c23cf 100644 --- a/nsqd/nsqd.go +++ b/nsqd/nsqd.go @@ -307,7 +307,7 @@ func (n *NSQD) PersistMetadata() error { n.logf("NSQ: persisting topic/channel metadata to %s", fileName) js := make(map[string]interface{}) - topics := make([]interface{}, 0) + topics := []interface{}{} for _, topic := range n.topicMap { if topic.ephemeral { continue @@ -315,7 +315,7 @@ func (n *NSQD) PersistMetadata() error { topicData := make(map[string]interface{}) topicData["name"] = topic.name topicData["paused"] = topic.IsPaused() - channels := make([]interface{}, 0) + channels := []interface{}{} topic.Lock() for _, channel := range topic.channelMap { channel.Lock() diff --git a/nsqd/protocol_v2_test.go b/nsqd/protocol_v2_test.go index 1f454607e..ce8a76fb0 100644 --- a/nsqd/protocol_v2_test.go +++ b/nsqd/protocol_v2_test.go @@ -460,7 +460,7 @@ func TestSizeLimits(t *testing.T) { defer conn.Close() // PUB thats empty - nsq.Publish(topicName, make([]byte, 0)).WriteTo(conn) + nsq.Publish(topicName, []byte{}).WriteTo(conn) resp, _ = nsq.ReadResponse(conn) frameType, data, _ = nsq.UnpackResponse(resp) t.Logf("frameType: %d, data: %s", frameType, data) @@ -473,9 +473,9 @@ func TestSizeLimits(t *testing.T) { defer conn.Close() // MPUB body that's valid - mpub := make([][]byte, 0) - for i := 0; i < 5; i++ { - mpub = append(mpub, make([]byte, 100)) + mpub := make([][]byte, 5) + for i := range mpub { + mpub[i] = make([]byte, 100) } cmd, _ := nsq.MultiPublish(topicName, mpub) cmd.WriteTo(conn) @@ -486,9 +486,9 @@ func TestSizeLimits(t *testing.T) { equal(t, data, []byte("OK")) // MPUB body that's invalid (body too big) - mpub = make([][]byte, 0) - for i := 0; i < 11; i++ { - mpub = append(mpub, make([]byte, 100)) + mpub = make([][]byte, 11) + for i := range mpub { + mpub[i] = make([]byte, 100) } cmd, _ = nsq.MultiPublish(topicName, mpub) cmd.WriteTo(conn) @@ -504,11 +504,11 @@ func TestSizeLimits(t *testing.T) { defer conn.Close() // MPUB that's invalid (one message empty) - mpub = make([][]byte, 0) - for i := 0; i < 5; i++ { - mpub = append(mpub, make([]byte, 100)) + mpub = make([][]byte, 5) + for i := range mpub { + mpub[i] = make([]byte, 100) } - mpub = append(mpub, make([]byte, 0)) + mpub = append(mpub, []byte{}) cmd, _ = nsq.MultiPublish(topicName, mpub) cmd.WriteTo(conn) resp, _ = nsq.ReadResponse(conn) @@ -523,9 +523,9 @@ func TestSizeLimits(t *testing.T) { defer conn.Close() // MPUB body that's invalid (one of the messages is too big) - mpub = make([][]byte, 0) - for i := 0; i < 5; i++ { - mpub = append(mpub, make([]byte, 101)) + mpub = make([][]byte, 5) + for i := range mpub { + mpub[i] = make([]byte, 101) } cmd, _ = nsq.MultiPublish(topicName, mpub) cmd.WriteTo(conn) @@ -1402,9 +1402,9 @@ func benchmarkProtocolV2Pub(b *testing.B, size int) { tcpAddr, _, nsqd := mustStartNSQD(opts) msg := make([]byte, size) batchSize := 200 - batch := make([][]byte, 0) - for i := 0; i < batchSize; i++ { - batch = append(batch, msg) + batch := make([][]byte, batchSize) + for i := range batch { + batch[i] = msg } topicName := "bench_v2_pub" + strconv.Itoa(int(time.Now().Unix())) b.SetBytes(int64(len(msg))) diff --git a/nsqd/statsd.go b/nsqd/statsd.go index 8546eba9f..6adcab5c9 100644 --- a/nsqd/statsd.go +++ b/nsqd/statsd.go @@ -26,7 +26,7 @@ func (s Uint64Slice) Less(i, j int) bool { func (n *NSQD) statsdLoop() { var lastMemStats runtime.MemStats - lastStats := make([]TopicStats, 0) + var lastStats []TopicStats ticker := time.NewTicker(n.opts.StatsdInterval) for { select { diff --git a/nsqd/topic.go b/nsqd/topic.go index 48331bd81..29294a336 100644 --- a/nsqd/topic.go +++ b/nsqd/topic.go @@ -233,7 +233,7 @@ func (t *Topic) messagePump() { continue } case <-t.channelUpdateChan: - chans = make([]*Channel, 0) + chans = chans[:0] t.RLock() for _, c := range t.channelMap { chans = append(chans, c) diff --git a/nsqlookupd/registration_db.go b/nsqlookupd/registration_db.go index 5438d450c..90ad86ba4 100644 --- a/nsqlookupd/registration_db.go +++ b/nsqlookupd/registration_db.go @@ -63,7 +63,7 @@ func (r *RegistrationDB) AddRegistration(k Registration) { defer r.Unlock() _, ok := r.registrationMap[k] if !ok { - r.registrationMap[k] = make(Producers, 0) + r.registrationMap[k] = Producers{} } } @@ -93,7 +93,7 @@ func (r *RegistrationDB) RemoveProducer(k Registration, id string) (bool, int) { return false, 0 } removed := false - cleaned := make(Producers, 0) + cleaned := Producers{} for _, producer := range producers { if producer.peerInfo.id != id { cleaned = append(cleaned, producer) @@ -116,7 +116,7 @@ func (r *RegistrationDB) RemoveRegistration(k Registration) { func (r *RegistrationDB) FindRegistrations(category string, key string, subkey string) Registrations { r.RLock() defer r.RUnlock() - results := make(Registrations, 0) + results := Registrations{} for k := range r.registrationMap { if !k.IsMatch(category, key, subkey) { continue @@ -129,7 +129,7 @@ func (r *RegistrationDB) FindRegistrations(category string, key string, subkey s func (r *RegistrationDB) FindProducers(category string, key string, subkey string) Producers { r.RLock() defer r.RUnlock() - results := make(Producers, 0) + results := Producers{} for k, producers := range r.registrationMap { if !k.IsMatch(category, key, subkey) { continue @@ -152,7 +152,7 @@ func (r *RegistrationDB) FindProducers(category string, key string, subkey strin func (r *RegistrationDB) LookupRegistrations(id string) Registrations { r.RLock() defer r.RUnlock() - results := make(Registrations, 0) + results := Registrations{} for k, producers := range r.registrationMap { for _, p := range producers { if p.peerInfo.id == id { @@ -178,7 +178,7 @@ func (k Registration) IsMatch(category string, key string, subkey string) bool { } func (rr Registrations) Filter(category string, key string, subkey string) Registrations { - output := make(Registrations, 0) + output := Registrations{} for _, k := range rr { if k.IsMatch(category, key, subkey) { output = append(output, k) @@ -205,7 +205,7 @@ func (rr Registrations) SubKeys() []string { func (pp Producers) FilterByActive(inactivityTimeout time.Duration, tombstoneLifetime time.Duration) Producers { now := time.Now() - results := make(Producers, 0) + results := Producers{} for _, p := range pp { cur := time.Unix(0, atomic.LoadInt64(&p.peerInfo.lastUpdate)) if now.Sub(cur) > inactivityTimeout || p.IsTombstoned(tombstoneLifetime) { @@ -217,7 +217,7 @@ func (pp Producers) FilterByActive(inactivityTimeout time.Duration, tombstoneLif } func (pp Producers) PeerInfo() []*PeerInfo { - results := make([]*PeerInfo, 0) + results := []*PeerInfo{} for _, p := range pp { results = append(results, p.peerInfo) } diff --git a/util/lookupd/lookupd.go b/util/lookupd/lookupd.go index 91fe8532c..684f245dd 100644 --- a/util/lookupd/lookupd.go +++ b/util/lookupd/lookupd.go @@ -33,7 +33,7 @@ func GetVersion(addr string) (*semver.Version, error) { // from all the given lookupd func GetLookupdTopics(lookupdHTTPAddrs []string) ([]string, error) { success := false - allTopics := make([]string, 0) + var allTopics []string var lock sync.Mutex var wg sync.WaitGroup for _, addr := range lookupdHTTPAddrs { @@ -67,7 +67,7 @@ func GetLookupdTopics(lookupdHTTPAddrs []string) ([]string, error) { // from all the given lookupd for the given topic func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, error) { success := false - allChannels := make([]string, 0) + var allChannels []string var lock sync.Mutex var wg sync.WaitGroup for _, addr := range lookupdHTTPAddrs { @@ -101,8 +101,8 @@ func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, // containing metadata for each node connected to given lookupds func GetLookupdProducers(lookupdHTTPAddrs []string) ([]*Producer, error) { success := false - allProducers := make(map[string]*Producer, 0) - output := make([]*Producer, 0) + allProducers := make(map[string]*Producer) + var output []*Producer maxVersion, _ := semver.Parse("0.0.0") var lock sync.Mutex var wg sync.WaitGroup @@ -202,7 +202,7 @@ func GetLookupdProducers(lookupdHTTPAddrs []string) ([]*Producer, error) { // producers for a given topic by unioning the results returned from the given lookupd func GetLookupdTopicProducers(topic string, lookupdHTTPAddrs []string) ([]string, error) { success := false - allSources := make([]string, 0) + var allSources []string var lock sync.Mutex var wg sync.WaitGroup @@ -243,7 +243,7 @@ func GetLookupdTopicProducers(topic string, lookupdHTTPAddrs []string) ([]string // GetNSQDTopics returns a []string containing all the topics // produced by the given nsqd func GetNSQDTopics(nsqdHTTPAddrs []string) ([]string, error) { - topics := make([]string, 0) + var topics []string var lock sync.Mutex var wg sync.WaitGroup success := false @@ -280,7 +280,7 @@ func GetNSQDTopics(nsqdHTTPAddrs []string) ([]string, error) { // GetNSQDTopicProducers returns a []string containing the addresses of all the nsqd // that produce the given topic out of the given nsqd func GetNSQDTopicProducers(topic string, nsqdHTTPAddrs []string) ([]string, error) { - addresses := make([]string, 0) + var addresses []string var lock sync.Mutex var wg sync.WaitGroup success := false @@ -324,7 +324,7 @@ func GetNSQDStats(nsqdHTTPAddrs []string, selectedTopic string) ([]*TopicStats, var lock sync.Mutex var wg sync.WaitGroup - topicStatsList := make(TopicStatsList, 0) + var topicStatsList TopicStatsList channelStatsMap := make(map[string]*ChannelStats) success := false