Skip to content

Commit

Permalink
Get rid of make(X, 0)
Browse files Browse the repository at this point in the history
This can be replaced by one of several different, shorter constructs
depending on context.

This fixes some lint output.
  • Loading branch information
cespare committed Feb 26, 2015
1 parent 47b2d37 commit cefb1b2
Show file tree
Hide file tree
Showing 9 changed files with 50 additions and 50 deletions.
6 changes: 3 additions & 3 deletions bench/bench_writer/bench_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ func main() {
log.SetPrefix("[bench_writer] ")

msg := make([]byte, *size)
batch := make([][]byte, 0)
for i := 0; i < *batchSize; i++ {
batch = append(batch, msg)
batch := make([][]byte, *batchSize)
for i := range batch {
batch[i] = msg
}

goChan := make(chan int)
Expand Down
18 changes: 9 additions & 9 deletions nsqd/http_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ func TestHTTPmput(t *testing.T) {
topic := nsqd.GetTopic(topicName)

msg := []byte("test message")
msgs := make([][]byte, 0)
for i := 0; i < 4; i++ {
msgs = append(msgs, msg)
msgs := make([][]byte, 4)
for i := range msgs {
msgs[i] = msg
}
buf := bytes.NewBuffer(bytes.Join(msgs, []byte("\n")))

Expand All @@ -100,9 +100,9 @@ func TestHTTPmputEmpty(t *testing.T) {
topic := nsqd.GetTopic(topicName)

msg := []byte("test message")
msgs := make([][]byte, 0)
for i := 0; i < 4; i++ {
msgs = append(msgs, msg)
msgs := make([][]byte, 4)
for i := range msgs {
msgs[i] = msg
}
buf := bytes.NewBuffer(bytes.Join(msgs, []byte("\n")))
_, err := buf.Write([]byte("\n"))
Expand All @@ -129,9 +129,9 @@ func TestHTTPmputBinary(t *testing.T) {
topicName := "test_http_mput_bin" + strconv.Itoa(int(time.Now().Unix()))
topic := nsqd.GetTopic(topicName)

mpub := make([][]byte, 0)
for i := 0; i < 5; i++ {
mpub = append(mpub, make([]byte, 100))
mpub := make([][]byte, 5)
for i := range mpub {
mpub[i] = make([]byte, 100)
}
cmd, _ := nsq.MultiPublish(topicName, mpub)
buf := bytes.NewBuffer(cmd.Body)
Expand Down
2 changes: 1 addition & 1 deletion nsqd/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func (n *NSQD) lookupLoop() {
}
}
case lookupPeer := <-syncTopicChan:
commands := make([]*nsq.Command, 0)
var commands []*nsq.Command
// build all the commands first so we exit the lock(s) as fast as possible
n.RLock()
for _, topic := range n.topicMap {
Expand Down
4 changes: 2 additions & 2 deletions nsqd/nsqd.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,15 +307,15 @@ func (n *NSQD) PersistMetadata() error {
n.logf("NSQ: persisting topic/channel metadata to %s", fileName)

js := make(map[string]interface{})
topics := make([]interface{}, 0)
topics := []interface{}{}
for _, topic := range n.topicMap {
if topic.ephemeral {
continue
}
topicData := make(map[string]interface{})
topicData["name"] = topic.name
topicData["paused"] = topic.IsPaused()
channels := make([]interface{}, 0)
channels := []interface{}{}
topic.Lock()
for _, channel := range topic.channelMap {
channel.Lock()
Expand Down
34 changes: 17 additions & 17 deletions nsqd/protocol_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ func TestSizeLimits(t *testing.T) {
defer conn.Close()

// PUB thats empty
nsq.Publish(topicName, make([]byte, 0)).WriteTo(conn)
nsq.Publish(topicName, []byte{}).WriteTo(conn)
resp, _ = nsq.ReadResponse(conn)
frameType, data, _ = nsq.UnpackResponse(resp)
t.Logf("frameType: %d, data: %s", frameType, data)
Expand All @@ -473,9 +473,9 @@ func TestSizeLimits(t *testing.T) {
defer conn.Close()

// MPUB body that's valid
mpub := make([][]byte, 0)
for i := 0; i < 5; i++ {
mpub = append(mpub, make([]byte, 100))
mpub := make([][]byte, 5)
for i := range mpub {
mpub[i] = make([]byte, 100)
}
cmd, _ := nsq.MultiPublish(topicName, mpub)
cmd.WriteTo(conn)
Expand All @@ -486,9 +486,9 @@ func TestSizeLimits(t *testing.T) {
equal(t, data, []byte("OK"))

// MPUB body that's invalid (body too big)
mpub = make([][]byte, 0)
for i := 0; i < 11; i++ {
mpub = append(mpub, make([]byte, 100))
mpub = make([][]byte, 11)
for i := range mpub {
mpub[i] = make([]byte, 100)
}
cmd, _ = nsq.MultiPublish(topicName, mpub)
cmd.WriteTo(conn)
Expand All @@ -504,11 +504,11 @@ func TestSizeLimits(t *testing.T) {
defer conn.Close()

// MPUB that's invalid (one message empty)
mpub = make([][]byte, 0)
for i := 0; i < 5; i++ {
mpub = append(mpub, make([]byte, 100))
mpub = make([][]byte, 5)
for i := range mpub {
mpub[i] = make([]byte, 100)
}
mpub = append(mpub, make([]byte, 0))
mpub = append(mpub, []byte{})
cmd, _ = nsq.MultiPublish(topicName, mpub)
cmd.WriteTo(conn)
resp, _ = nsq.ReadResponse(conn)
Expand All @@ -523,9 +523,9 @@ func TestSizeLimits(t *testing.T) {
defer conn.Close()

// MPUB body that's invalid (one of the messages is too big)
mpub = make([][]byte, 0)
for i := 0; i < 5; i++ {
mpub = append(mpub, make([]byte, 101))
mpub = make([][]byte, 5)
for i := range mpub {
mpub[i] = make([]byte, 101)
}
cmd, _ = nsq.MultiPublish(topicName, mpub)
cmd.WriteTo(conn)
Expand Down Expand Up @@ -1402,9 +1402,9 @@ func benchmarkProtocolV2Pub(b *testing.B, size int) {
tcpAddr, _, nsqd := mustStartNSQD(opts)
msg := make([]byte, size)
batchSize := 200
batch := make([][]byte, 0)
for i := 0; i < batchSize; i++ {
batch = append(batch, msg)
batch := make([][]byte, batchSize)
for i := range batch {
batch[i] = msg
}
topicName := "bench_v2_pub" + strconv.Itoa(int(time.Now().Unix()))
b.SetBytes(int64(len(msg)))
Expand Down
2 changes: 1 addition & 1 deletion nsqd/statsd.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func (s Uint64Slice) Less(i, j int) bool {

func (n *NSQD) statsdLoop() {
var lastMemStats runtime.MemStats
lastStats := make([]TopicStats, 0)
var lastStats []TopicStats
ticker := time.NewTicker(n.opts.StatsdInterval)
for {
select {
Expand Down
2 changes: 1 addition & 1 deletion nsqd/topic.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ func (t *Topic) messagePump() {
continue
}
case <-t.channelUpdateChan:
chans = make([]*Channel, 0)
chans = chans[:0]
t.RLock()
for _, c := range t.channelMap {
chans = append(chans, c)
Expand Down
16 changes: 8 additions & 8 deletions nsqlookupd/registration_db.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func (r *RegistrationDB) AddRegistration(k Registration) {
defer r.Unlock()
_, ok := r.registrationMap[k]
if !ok {
r.registrationMap[k] = make(Producers, 0)
r.registrationMap[k] = Producers{}
}
}

Expand Down Expand Up @@ -93,7 +93,7 @@ func (r *RegistrationDB) RemoveProducer(k Registration, id string) (bool, int) {
return false, 0
}
removed := false
cleaned := make(Producers, 0)
cleaned := Producers{}
for _, producer := range producers {
if producer.peerInfo.id != id {
cleaned = append(cleaned, producer)
Expand All @@ -116,7 +116,7 @@ func (r *RegistrationDB) RemoveRegistration(k Registration) {
func (r *RegistrationDB) FindRegistrations(category string, key string, subkey string) Registrations {
r.RLock()
defer r.RUnlock()
results := make(Registrations, 0)
results := Registrations{}
for k := range r.registrationMap {
if !k.IsMatch(category, key, subkey) {
continue
Expand All @@ -129,7 +129,7 @@ func (r *RegistrationDB) FindRegistrations(category string, key string, subkey s
func (r *RegistrationDB) FindProducers(category string, key string, subkey string) Producers {
r.RLock()
defer r.RUnlock()
results := make(Producers, 0)
results := Producers{}
for k, producers := range r.registrationMap {
if !k.IsMatch(category, key, subkey) {
continue
Expand All @@ -152,7 +152,7 @@ func (r *RegistrationDB) FindProducers(category string, key string, subkey strin
func (r *RegistrationDB) LookupRegistrations(id string) Registrations {
r.RLock()
defer r.RUnlock()
results := make(Registrations, 0)
results := Registrations{}
for k, producers := range r.registrationMap {
for _, p := range producers {
if p.peerInfo.id == id {
Expand All @@ -178,7 +178,7 @@ func (k Registration) IsMatch(category string, key string, subkey string) bool {
}

func (rr Registrations) Filter(category string, key string, subkey string) Registrations {
output := make(Registrations, 0)
output := Registrations{}
for _, k := range rr {
if k.IsMatch(category, key, subkey) {
output = append(output, k)
Expand All @@ -205,7 +205,7 @@ func (rr Registrations) SubKeys() []string {

func (pp Producers) FilterByActive(inactivityTimeout time.Duration, tombstoneLifetime time.Duration) Producers {
now := time.Now()
results := make(Producers, 0)
results := Producers{}
for _, p := range pp {
cur := time.Unix(0, atomic.LoadInt64(&p.peerInfo.lastUpdate))
if now.Sub(cur) > inactivityTimeout || p.IsTombstoned(tombstoneLifetime) {
Expand All @@ -217,7 +217,7 @@ func (pp Producers) FilterByActive(inactivityTimeout time.Duration, tombstoneLif
}

func (pp Producers) PeerInfo() []*PeerInfo {
results := make([]*PeerInfo, 0)
results := []*PeerInfo{}
for _, p := range pp {
results = append(results, p.peerInfo)
}
Expand Down
16 changes: 8 additions & 8 deletions util/lookupd/lookupd.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func GetVersion(addr string) (*semver.Version, error) {
// from all the given lookupd
func GetLookupdTopics(lookupdHTTPAddrs []string) ([]string, error) {
success := false
allTopics := make([]string, 0)
var allTopics []string
var lock sync.Mutex
var wg sync.WaitGroup
for _, addr := range lookupdHTTPAddrs {
Expand Down Expand Up @@ -67,7 +67,7 @@ func GetLookupdTopics(lookupdHTTPAddrs []string) ([]string, error) {
// from all the given lookupd for the given topic
func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, error) {
success := false
allChannels := make([]string, 0)
var allChannels []string
var lock sync.Mutex
var wg sync.WaitGroup
for _, addr := range lookupdHTTPAddrs {
Expand Down Expand Up @@ -101,8 +101,8 @@ func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string,
// containing metadata for each node connected to given lookupds
func GetLookupdProducers(lookupdHTTPAddrs []string) ([]*Producer, error) {
success := false
allProducers := make(map[string]*Producer, 0)
output := make([]*Producer, 0)
allProducers := make(map[string]*Producer)
var output []*Producer
maxVersion, _ := semver.Parse("0.0.0")
var lock sync.Mutex
var wg sync.WaitGroup
Expand Down Expand Up @@ -202,7 +202,7 @@ func GetLookupdProducers(lookupdHTTPAddrs []string) ([]*Producer, error) {
// producers for a given topic by unioning the results returned from the given lookupd
func GetLookupdTopicProducers(topic string, lookupdHTTPAddrs []string) ([]string, error) {
success := false
allSources := make([]string, 0)
var allSources []string
var lock sync.Mutex
var wg sync.WaitGroup

Expand Down Expand Up @@ -243,7 +243,7 @@ func GetLookupdTopicProducers(topic string, lookupdHTTPAddrs []string) ([]string
// GetNSQDTopics returns a []string containing all the topics
// produced by the given nsqd
func GetNSQDTopics(nsqdHTTPAddrs []string) ([]string, error) {
topics := make([]string, 0)
var topics []string
var lock sync.Mutex
var wg sync.WaitGroup
success := false
Expand Down Expand Up @@ -280,7 +280,7 @@ func GetNSQDTopics(nsqdHTTPAddrs []string) ([]string, error) {
// GetNSQDTopicProducers returns a []string containing the addresses of all the nsqd
// that produce the given topic out of the given nsqd
func GetNSQDTopicProducers(topic string, nsqdHTTPAddrs []string) ([]string, error) {
addresses := make([]string, 0)
var addresses []string
var lock sync.Mutex
var wg sync.WaitGroup
success := false
Expand Down Expand Up @@ -324,7 +324,7 @@ func GetNSQDStats(nsqdHTTPAddrs []string, selectedTopic string) ([]*TopicStats,
var lock sync.Mutex
var wg sync.WaitGroup

topicStatsList := make(TopicStatsList, 0)
var topicStatsList TopicStatsList
channelStatsMap := make(map[string]*ChannelStats)

success := false
Expand Down

0 comments on commit cefb1b2

Please sign in to comment.