diff --git a/go.mod b/go.mod
index 1fe37829..47cae138 100644
--- a/go.mod
+++ b/go.mod
@@ -54,6 +54,9 @@ require (
github.com/aws/smithy-go v1.20.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/couchbase/go-couchbase v0.1.1 // indirect
+ github.com/couchbase/gomemcached v0.3.3 // indirect
+ github.com/couchbase/goutils v0.1.2 // indirect
github.com/danieljoos/wincred v1.2.2 // indirect
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
github.com/elastic/go-sysinfo v1.11.2 // indirect
@@ -132,3 +135,5 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v1.0.1 // indirect
)
+
+require github.com/couchbase/go_n1ql v0.0.0-20220303011133-0ed4bf93e31d
diff --git a/go.sum b/go.sum
index 77baa08f..125169af 100644
--- a/go.sum
+++ b/go.sum
@@ -101,6 +101,14 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/couchbase/go-couchbase v0.1.1 h1:ClFXELcKj/ojyoTYbsY34QUrrYCBi/1G749sXSCkdhk=
+github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
+github.com/couchbase/go_n1ql v0.0.0-20220303011133-0ed4bf93e31d h1:jOxYt3U9z+tj2WDvacvBhXmHXDt+EUR5Hbu56wTw6QY=
+github.com/couchbase/go_n1ql v0.0.0-20220303011133-0ed4bf93e31d/go.mod h1:Rn19fO9CVfhJkqyIED9ixL5Kh5XuH7hXgDTxyfGY7hM=
+github.com/couchbase/gomemcached v0.3.3 h1:D7qqXLO8wNa4pn5oE65lT3pA3IeStn4joT7/JgGXzKc=
+github.com/couchbase/gomemcached v0.3.3/go.mod h1:pISAjweI42vljCumsJIo7CVhqIMIIP9g3Wfhl1JJw68=
+github.com/couchbase/goutils v0.1.2 h1:gWr8B6XNWPIhfalHNog3qQKfGiYyh4K4VhO3P2o9BCs=
+github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0=
github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8=
@@ -183,6 +191,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8=
github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -193,6 +202,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
@@ -403,6 +413,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -460,6 +471,10 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -474,6 +489,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -492,6 +511,10 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -505,6 +528,11 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -527,12 +555,23 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -542,6 +581,11 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
@@ -561,6 +605,9 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/job.go b/job.go
index 64cdab20..c78c2406 100644
--- a/job.go
+++ b/job.go
@@ -31,6 +31,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/rds/rdsutils"
+ go_n1ql "github.com/couchbase/go_n1ql" // register the N1QL driver
)
var (
@@ -347,6 +348,37 @@ func (j *Job) updateConnections() {
}
}
+ // N1QL support: recognize n1ql:// or http(s)://host:8093 as N1QL
+ if strings.HasPrefix(conn, "n1ql://") || strings.HasPrefix(conn, "http://") || strings.HasPrefix(conn, "https://") {
+ u, err := url.Parse(conn)
+ if err != nil {
+ level.Error(j.log).Log("msg", "Failed to parse N1QL URL", "url", conn, "err", err)
+ continue
+ }
+ user := ""
+ password := ""
+ if u.User != nil {
+ user = u.User.Username()
+ password, _ = u.User.Password()
+ }
+ database := strings.TrimPrefix(u.Path, "/")
+ // For N1QL, the host is the host:port of the query service
+ newConn := &connection{
+ conn: nil,
+ url: conn,
+ driver: "n1ql",
+ host: u.Host,
+ database: database,
+ user: user,
+ }
+ j.conns = append(j.conns, newConn)
+
+ ac := []byte(fmt.Sprintf(`[{"user": "local:%s", "pass": "%s"}]`, user, password))
+ go_n1ql.SetQueryParams("creds", string(ac))
+ go_n1ql.SetQueryParams("timeout", "30s")
+ continue
+ }
+
u, err := url.Parse(conn)
if err != nil {
level.Error(j.log).Log("msg", "Failed to parse URL", "url", conn, "err", err)
diff --git a/query.go b/query.go
index afa07269..df864290 100644
--- a/query.go
+++ b/query.go
@@ -1,6 +1,7 @@
package main
import (
+ "encoding/json"
"fmt"
"strconv"
"strings"
@@ -194,8 +195,10 @@ func (q *Query) updateMetrics(conn *connection, res map[string]interface{}, iv s
// updateMetrics parses a single row and returns a const metric
func (q *Query) updateMetric(conn *connection, res map[string]interface{}, valueName string, iv string, il string) (prometheus.Metric, error) {
+ //level.Debug(q.log).Log("msg", "Updating metric", "valueName", valueName, "res", res)
var value float64
if i, ok := res[valueName]; ok {
+ //level.Debug(q.log).Log("msg", "Value type", "type", fmt.Sprintf("%T", i), "valueName", valueName)
switch f := i.(type) {
case int:
value = float64(f)
@@ -214,11 +217,36 @@ func (q *Query) updateMetric(conn *connection, res map[string]interface{}, value
case float64:
value = float64(f)
case []uint8:
- val, err := strconv.ParseFloat(string(f), 64)
- if err != nil {
- return nil, fmt.Errorf("column '%s' must be type float, is '%T' (val: %s)", valueName, i, f)
+ if conn.driver == "n1ql" {
+ var jsonData map[string]interface{}
+ // First try to parse as float
+ if val, err := strconv.ParseFloat(string(f), 64); err == nil {
+ value = val
+ } else {
+ // If float parsing fails, try JSON parsing
+ //level.Debug(q.log).Log("msg", "Parsing JSON data", "data", string(f), "valueField", valueName)
+ if err := json.Unmarshal(f, &jsonData); err == nil {
+ if v, ok := jsonData[valueName]; ok {
+ switch vt := v.(type) {
+ case float64:
+ value = vt
+ case int:
+ value = float64(vt)
+ default:
+ return nil, fmt.Errorf("JSON field '%s' must be numeric, got %T", valueName, v)
+ }
+ } else {
+ return nil, fmt.Errorf("JSON does not contain field '%s'", valueName)
+ }
+ }
+ }
+ } else {
+ val, err := strconv.ParseFloat(string(f), 64)
+ if err != nil {
+ return nil, fmt.Errorf("column '%s' must be type float or JSON with numeric field, is '%T' (val: %s)", valueName, i, f)
+ }
+ value = val
}
- value = val
case string:
val, err := strconv.ParseFloat(f, 64)
if err != nil {
@@ -255,7 +283,18 @@ func (q *Query) updateMetric(conn *connection, res map[string]interface{}, value
case string:
lv = str
case []uint8:
- lv = string(str)
+ if conn.driver == "n1ql" {
+ // For N1QL, treat as JSON containing a single scalar string
+ var jsonStr string
+ //level.Debug(q.log).Log("msg", "Parsing JSON data", "data", string(str), "label", label)
+ if err := json.Unmarshal(str, &jsonStr); err != nil {
+ lv = string(str) // Fallback to raw string if not valid JSON
+ } else {
+ lv = jsonStr
+ }
+ } else {
+ lv = string(str)
+ }
default:
return nil, fmt.Errorf("column '%s' must be type text (string)", label)
}
diff --git a/vendor/github.com/couchbase/go-couchbase/.gitignore b/vendor/github.com/couchbase/go-couchbase/.gitignore
new file mode 100644
index 00000000..eda885ce
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/.gitignore
@@ -0,0 +1,14 @@
+#*
+*.6
+*.a
+*~
+*.swp
+/examples/basic/basic
+/hello/hello
+/populate/populate
+/tools/view2go/view2go
+/tools/loadfile/loadfile
+gotags.files
+TAGS
+6.out
+_*
diff --git a/vendor/github.com/couchbase/go-couchbase/.travis.yml b/vendor/github.com/couchbase/go-couchbase/.travis.yml
new file mode 100644
index 00000000..4ecafb18
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+install: go get -v -d ./... && go build -v ./...
+script: go test -v ./...
+
+go: 1.1.1
diff --git a/vendor/github.com/couchbase/go-couchbase/LICENSE b/vendor/github.com/couchbase/go-couchbase/LICENSE
new file mode 100644
index 00000000..0b23ef35
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Couchbase, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/couchbase/go-couchbase/README.markdown b/vendor/github.com/couchbase/go-couchbase/README.markdown
new file mode 100644
index 00000000..bf5fe494
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/README.markdown
@@ -0,0 +1,37 @@
+# A smart client for couchbase in go
+
+This is a *unoffical* version of a Couchbase Golang client. If you are
+looking for the *Offical* Couchbase Golang client please see
+ [CB-go])[https://github.com/couchbaselabs/gocb].
+
+This is an evolving package, but does provide a useful interface to a
+[couchbase](http://www.couchbase.com/) server including all of the
+pool/bucket discovery features, compatible key distribution with other
+clients, and vbucket motion awareness so application can continue to
+operate during rebalances.
+
+It also supports view querying with source node randomization so you
+don't bang on all one node to do all the work.
+
+## Install
+
+ go get github.com/couchbase/go-couchbase
+
+## Example
+
+ c, err := couchbase.Connect("http://dev-couchbase.example.com:8091/")
+ if err != nil {
+ log.Fatalf("Error connecting: %v", err)
+ }
+
+ pool, err := c.GetPool("default")
+ if err != nil {
+ log.Fatalf("Error getting pool: %v", err)
+ }
+
+ bucket, err := pool.GetBucket("default")
+ if err != nil {
+ log.Fatalf("Error getting bucket: %v", err)
+ }
+
+ bucket.Set("someKey", 0, []string{"an", "example", "list"})
diff --git a/vendor/github.com/couchbase/go-couchbase/audit.go b/vendor/github.com/couchbase/go-couchbase/audit.go
new file mode 100644
index 00000000..3db7d9f9
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/audit.go
@@ -0,0 +1,32 @@
+package couchbase
+
+import ()
+
+// Sample data:
+// {"disabled":["12333", "22244"],"uid":"132492431","auditdEnabled":true,
+// "disabledUsers":[{"name":"bill","domain":"local"},{"name":"bob","domain":"local"}],
+// "logPath":"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs",
+// "rotateInterval":86400,"rotateSize":20971520}
+type AuditSpec struct {
+ Disabled []uint32 `json:"disabled"`
+ Uid string `json:"uid"`
+ AuditdEnabled bool `json:"auditdEnabled`
+ DisabledUsers []AuditUser `json:"disabledUsers"`
+ LogPath string `json:"logPath"`
+ RotateInterval int64 `json:"rotateInterval"`
+ RotateSize int64 `json:"rotateSize"`
+}
+
+type AuditUser struct {
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+}
+
+func (c *Client) GetAuditSpec() (*AuditSpec, error) {
+ ret := &AuditSpec{}
+ err := c.parseURLResponse("/settings/audit", ret)
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/client.go b/vendor/github.com/couchbase/go-couchbase/client.go
new file mode 100644
index 00000000..dad36613
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/client.go
@@ -0,0 +1,1721 @@
+/*
+Package couchbase provides a smart client for go.
+
+Usage:
+
+ client, err := couchbase.Connect("http://myserver:8091/")
+ handleError(err)
+ pool, err := client.GetPool("default")
+ handleError(err)
+ bucket, err := pool.GetBucket("MyAwesomeBucket")
+ handleError(err)
+ ...
+
+or a shortcut for the bucket directly
+
+ bucket, err := couchbase.GetBucket("http://myserver:8091/", "default", "default")
+
+in any case, you can specify authentication credentials using
+standard URL userinfo syntax:
+
+ b, err := couchbase.GetBucket("http://bucketname:bucketpass@myserver:8091/",
+ "default", "bucket")
+*/
+package couchbase
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unsafe"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/client" // package name is 'memcached'
+ "github.com/couchbase/goutils/logging"
+)
+
+// Mutation Token
+type MutationToken struct {
+ VBid uint16 // vbucket id
+ Guard uint64 // vbuuid
+ Value uint64 // sequence number
+}
+
+// Maximum number of times to retry a chunk of a bulk get on error.
+var MaxBulkRetries = 5000
+var backOffDuration time.Duration = 100 * time.Millisecond
+var MaxBackOffRetries = 25 // exponentail backOff result in over 30sec (25*13*0.1s)
+
+// If this is set to a nonzero duration, Do() and ViewCustom() will log a warning if the call
+// takes longer than that.
+var SlowServerCallWarningThreshold time.Duration
+
+func slowLog(startTime time.Time, format string, args ...interface{}) {
+ if elapsed := time.Now().Sub(startTime); elapsed > SlowServerCallWarningThreshold {
+ pc, _, _, _ := runtime.Caller(2)
+ caller := runtime.FuncForPC(pc).Name()
+ logging.Infof("go-couchbase: "+format+" in "+caller+" took "+elapsed.String(), args...)
+ }
+}
+
+// Return true if error is KEY_EEXISTS. Required by cbq-engine
+func IsKeyEExistsError(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && res.Status == gomemcached.KEY_EEXISTS {
+ return true
+ }
+
+ return false
+}
+
+// Return true if error is KEY_ENOENT. Required by cbq-engine
+func IsKeyNoEntError(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && res.Status == gomemcached.KEY_ENOENT {
+ return true
+ }
+
+ return false
+}
+
+// Return true if error suggests a bucket refresh is required. Required by cbq-engine
+func IsRefreshRequired(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && (res.Status == gomemcached.NO_BUCKET || res.Status == gomemcached.NOT_MY_VBUCKET) {
+ return true
+ }
+
+ return false
+}
+
+// Return true if a collection is not known. Required by cbq-engine
+func IsUnknownCollection(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && (res.Status == gomemcached.UNKNOWN_COLLECTION) {
+ return true
+ }
+
+ return false
+}
+
+// ClientOpCallback is called for each invocation of Do.
+var ClientOpCallback func(opname, k string, start time.Time, err error)
+
+// Do executes a function on a memcached connection to the node owning key "k"
+//
+// Note that this automatically handles transient errors by replaying
+// your function on a "not-my-vbucket" error, so don't assume
+// your command will only be executed only once.
+func (b *Bucket) Do(k string, f func(mc *memcached.Client, vb uint16) error) (err error) {
+ return b.Do2(k, f, true)
+}
+
+func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, deadline bool) (err error) {
+ var lastError error
+
+ if SlowServerCallWarningThreshold > 0 {
+ defer slowLog(time.Now(), "call to Do(%q)", k)
+ }
+
+ vb := b.VBHash(k)
+ maxTries := len(b.Nodes()) * 2
+ for i := 0; i < maxTries; i++ {
+ conn, pool, err := b.getConnectionToVBucket(vb)
+ if err != nil {
+ if (err == errNoPool || isConnError(err)) && backOff(i, maxTries, backOffDuration, true) {
+ b.Refresh()
+ continue
+ }
+ return err
+ }
+
+ if deadline && DefaultTimeout > 0 {
+ conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
+ } else {
+ conn.SetDeadline(noDeadline)
+ }
+ lastError = f(conn, uint16(vb))
+
+ retry := false
+ discard := isOutOfBoundsError(lastError) || IsReadTimeOutError(lastError)
+
+ // MB-30967 / MB-31001 implement back off for transient errors
+ if resp, ok := lastError.(*gomemcached.MCResponse); ok {
+ switch resp.Status {
+ case gomemcached.NOT_MY_VBUCKET:
+ retry = backOff(i, maxTries, backOffDuration, false)
+ if retry {
+ b.Refresh()
+ }
+
+ // MB-28842: in case of NMVB, check if the node is still part of the map
+ // and ditch the connection if it isn't.
+ discard = b.checkVBmap(pool.Node())
+ case gomemcached.NOT_SUPPORTED:
+ discard = true
+ retry = true
+ case gomemcached.ENOMEM:
+ fallthrough
+ case gomemcached.TMPFAIL, gomemcached.EBUSY:
+ retry = backOff(i, maxTries, backOffDuration, true)
+ }
+ } else if lastError != nil && isConnError(lastError) && backOff(i, maxTries, backOffDuration, true) {
+ retry = true
+ }
+
+ if discard {
+ pool.Discard(conn)
+ } else {
+ pool.Return(conn)
+ }
+
+ if !retry {
+ return lastError
+ }
+ }
+
+ if resp, ok := lastError.(*gomemcached.MCResponse); ok {
+ err := gomemcached.StatusNames[resp.Status]
+ if err == "" {
+ err = fmt.Sprintf("KV status %v", resp.Status)
+ }
+ return fmt.Errorf("unable to complete action after %v attempts: %v", maxTries, err)
+ } else {
+ return fmt.Errorf("unable to complete action after %v attempts: %v", maxTries, lastError)
+ }
+}
+
+type GatheredStats struct {
+ Server string
+ Stats map[string]string
+ Err error
+}
+
+func getStatsParallel(sn string, b *Bucket, offset int, which string,
+ ch chan<- GatheredStats) {
+ pool := b.getConnPool(offset)
+ var gatheredStats GatheredStats
+
+ conn, err := pool.Get()
+ defer func() {
+ pool.Return(conn)
+ ch <- gatheredStats
+ }()
+
+ if err != nil {
+ gatheredStats = GatheredStats{Server: sn, Err: err}
+ } else {
+ conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
+ sm, err := conn.StatsMap(which)
+ gatheredStats = GatheredStats{Server: sn, Stats: sm, Err: err}
+ }
+}
+
+func getStatsParallelFunc(fn func(key, val []byte), sn string, b *Bucket, offset int, which string,
+ ch chan<- GatheredStats) {
+ pool := b.getConnPool(offset)
+
+ conn, err := pool.Get()
+
+ if err == nil {
+ conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
+ err = conn.StatsFunc(which, fn)
+ pool.Return(conn)
+ }
+ ch <- GatheredStats{Server: sn, Err: err}
+}
+
+// GetStats gets a set of stats from all servers.
+//
+// Returns a map of server ID -> map of stat key to map value.
+func (b *Bucket) GetStats(which string) map[string]map[string]string {
+ rv := map[string]map[string]string{}
+ for server, gs := range b.GatherStats(which) {
+ if len(gs.Stats) > 0 {
+ rv[server] = gs.Stats
+ }
+ }
+ return rv
+}
+
+// GatherStats returns a map of server ID -> GatheredStats from all servers.
+func (b *Bucket) GatherStats(which string) map[string]GatheredStats {
+ vsm := b.VBServerMap()
+ if vsm.ServerList == nil {
+ return nil
+ }
+
+ // Go grab all the things at once.
+ ch := make(chan GatheredStats, len(vsm.ServerList))
+ for i, sn := range vsm.ServerList {
+ go getStatsParallel(sn, b, i, which, ch)
+ }
+
+ // Gather the results
+ rv := map[string]GatheredStats{}
+ for range vsm.ServerList {
+ gs := <-ch
+ rv[gs.Server] = gs
+ }
+ return rv
+}
+
+// GatherStats returns a map of server ID -> GatheredStats from all servers.
+func (b *Bucket) GatherStatsFunc(which string, fn func(key, val []byte)) map[string]error {
+ var errMap map[string]error
+
+ vsm := b.VBServerMap()
+ if vsm.ServerList == nil {
+ return errMap
+ }
+
+ // Go grab all the things at once.
+ ch := make(chan GatheredStats, len(vsm.ServerList))
+ for i, sn := range vsm.ServerList {
+ go getStatsParallelFunc(fn, sn, b, i, which, ch)
+ }
+
+ // Gather the results
+ for range vsm.ServerList {
+ gs := <-ch
+ if gs.Err != nil {
+ if errMap == nil {
+ errMap = make(map[string]error)
+ errMap[gs.Server] = gs.Err
+ }
+ }
+ }
+ return errMap
+}
+
+type BucketStats int
+
+const (
+ StatCount = BucketStats(iota)
+ StatSize
+)
+
+var bucketStatString = []string{
+ "curr_items",
+ "ep_value_size",
+}
+
+var collectionStatString = []string{
+ "items",
+ "data_size",
+}
+
+// Get selected bucket or collection stats
+func (b *Bucket) GetIntStats(refresh bool, which []BucketStats, context ...*memcached.ClientContext) ([]int64, error) {
+ if refresh {
+ b.Refresh()
+ }
+
+ var vals []int64 = make([]int64, len(which))
+ if len(vals) == 0 {
+ return vals, nil
+ }
+
+ var outErr error
+ if len(context) > 0 {
+
+ collKey := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
+ errs := b.GatherStatsFunc(collKey, func(key, val []byte) {
+ for i, f := range which {
+ lk := len(key)
+ ls := len(collectionStatString[f])
+ if lk >= ls && string(key[lk-ls:]) == collectionStatString[f] {
+ v, err := strconv.ParseInt(string(val), 10, 64)
+ if err == nil {
+ atomic.AddInt64(&vals[i], v)
+ } else if outErr == nil {
+ outErr = err
+ }
+ }
+ }
+ })
+
+ // have to use a range to access any one element of a map
+ for _, err := range errs {
+ return nil, err
+ }
+ } else {
+ errs := b.GatherStatsFunc("", func(key, val []byte) {
+ for i, f := range which {
+ if string(key) == bucketStatString[f] {
+ v, err := strconv.ParseInt(string(val), 10, 64)
+ if err == nil {
+ atomic.AddInt64(&vals[i], v)
+ } else if outErr == nil {
+ outErr = err
+ }
+ }
+ }
+ })
+
+ // have to use a range to access any one element of a map
+ for _, err := range errs {
+ return nil, err
+ }
+ }
+
+ return vals, outErr
+}
+
+// Get bucket count through the bucket stats
+func (b *Bucket) GetCount(refresh bool, context ...*memcached.ClientContext) (count int64, err error) {
+ if refresh {
+ b.Refresh()
+ }
+
+ var cnt int64
+ if len(context) > 0 {
+ key := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
+ resKey := ""
+ for _, gs := range b.GatherStats(key) {
+ if len(gs.Stats) > 0 {
+
+ // the key encodes the scope and collection id
+ // we don't have the scope id, so we have to find it...
+ if resKey == "" {
+ for k, _ := range gs.Stats {
+ resKey = strings.TrimRightFunc(k, func(r rune) bool {
+ return r != ':'
+ }) + "items"
+ break
+ }
+ }
+ cnt, err = strconv.ParseInt(gs.Stats[resKey], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ count += cnt
+ } else if gs.Err != nil {
+ return 0, gs.Err
+ }
+ }
+ } else {
+ for _, gs := range b.GatherStats("") {
+ if len(gs.Stats) > 0 {
+ cnt, err = strconv.ParseInt(gs.Stats["curr_items"], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ count += cnt
+ } else if gs.Err != nil {
+ return 0, gs.Err
+ }
+ }
+ }
+
+ return count, nil
+}
+
+// Get bucket document size through the bucket stats
+func (b *Bucket) GetSize(refresh bool, context ...*memcached.ClientContext) (size int64, err error) {
+
+ if refresh {
+ b.Refresh()
+ }
+
+ var sz int64
+ if len(context) > 0 {
+ key := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
+ resKey := ""
+ for _, gs := range b.GatherStats(key) {
+ if len(gs.Stats) > 0 {
+
+ // the key encodes the scope and collection id
+ // we don't have the scope id, so we have to find it...
+ if resKey == "" {
+ for k, _ := range gs.Stats {
+ resKey = strings.TrimRightFunc(k, func(r rune) bool {
+ return r != ':'
+ }) + "data_size"
+ break
+ }
+ }
+ sz, err = strconv.ParseInt(gs.Stats[resKey], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ size += sz
+ } else if gs.Err != nil {
+ return 0, gs.Err
+ }
+ }
+ } else {
+ for _, gs := range b.GatherStats("") {
+ if len(gs.Stats) > 0 {
+ sz, err = strconv.ParseInt(gs.Stats["ep_value_size"], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ size += sz
+ } else if gs.Err != nil {
+ return 0, gs.Err
+ }
+ }
+ }
+
+ return size, nil
+}
+
+func isAuthError(err error) bool {
+ estr := err.Error()
+ return strings.Contains(estr, "Auth failure")
+}
+
+func IsReadTimeOutError(err error) bool {
+ if err == nil {
+ return false
+ }
+ estr := err.Error()
+ return strings.Contains(estr, "read tcp") ||
+ strings.Contains(estr, "i/o timeout")
+}
+
+func isTimeoutError(err error) bool {
+ estr := err.Error()
+ return strings.Contains(estr, "i/o timeout") ||
+ strings.Contains(estr, "connection timed out") ||
+ strings.Contains(estr, "no route to host")
+}
+
+// Errors that are not considered fatal for our fetch loop
+func isConnError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ estr := err.Error()
+ return strings.Contains(estr, "broken pipe") ||
+ strings.Contains(estr, "connection reset") ||
+ strings.Contains(estr, "connection refused") ||
+ strings.Contains(estr, "connection pool is closed")
+}
+
+func isOutOfBoundsError(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "Out of Bounds error")
+
+}
+
+func isAddrNotAvailable(err error) bool {
+ if err == nil {
+ return false
+ }
+ estr := err.Error()
+ return strings.Contains(estr, "cannot assign requested address")
+}
+
+func getDeadline(reqDeadline time.Time, duration time.Duration) time.Time {
+ if reqDeadline.IsZero() {
+ if duration > 0 {
+ return time.Unix(time.Now().Unix(), 0).Add(duration)
+ } else {
+ return noDeadline
+ }
+ }
+ return reqDeadline
+}
+
+func backOff(attempt, maxAttempts int, duration time.Duration, exponential bool) bool {
+ if attempt < maxAttempts {
+ // 0th attempt return immediately
+ if attempt > 0 {
+ if exponential {
+ duration = time.Duration(attempt) * duration
+ }
+ time.Sleep(duration)
+ }
+ return true
+ }
+
+ return false
+}
+
+func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
+ ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
+ eStatus *errorStatus, context ...*memcached.ClientContext) {
+ if SlowServerCallWarningThreshold > 0 {
+ defer slowLog(time.Now(), "call to doBulkGet(%d, %d keys)", vb, len(keys))
+ }
+
+ rv := _STRING_MCRESPONSE_POOL.Get()
+ attempts := 0
+ backOffAttempts := 0
+ done := false
+ bname := b.Name
+ var lastError error
+ for ; attempts < MaxBulkRetries && !done && !eStatus.errStatus; attempts++ {
+
+ if len(b.VBServerMap().VBucketMap) < int(vb) {
+ //fatal
+ err := fmt.Errorf("vbmap smaller than requested for %v", bname)
+ logging.Errorf("go-couchbase: %v vb %d vbmap len %d", err.Error(), vb, len(b.VBServerMap().VBucketMap))
+ ech <- err
+ return
+ }
+
+ masterID := b.VBServerMap().VBucketMap[vb][0]
+
+ if masterID < 0 {
+ // fatal
+ err := fmt.Errorf("No master node available for %v vb %d", bname, vb)
+ logging.Errorf("%v", err.Error())
+ ech <- err
+ return
+ }
+
+ // This stack frame exists to ensure we can clean up
+ // connection at a reasonable time.
+ err := func() error {
+ pool := b.getConnPool(masterID)
+ conn, err := pool.Get()
+ if err != nil {
+ if isAuthError(err) || isTimeoutError(err) {
+ logging.Errorf("Fatal Error %v : %v", bname, err)
+ ech <- err
+ return err
+ } else if isConnError(err) {
+ if !backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ logging.Errorf("Connection Error %v : %v", bname, err)
+ ech <- err
+ return err
+ }
+ b.Refresh()
+ backOffAttempts++
+ } else if err == errNoPool {
+ if !backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ logging.Errorf("Connection Error %v : %v", bname, err)
+ ech <- err
+ return err
+ }
+ err = b.Refresh()
+ if err != nil {
+ ech <- err
+ return err
+ }
+ backOffAttempts++
+
+ // retry, and make no noise
+ return nil
+ } else if isAddrNotAvailable(err) {
+ if !backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ logging.Errorf("Out of ephemeral ports: %v : %v", bname, err)
+ ech <- err
+ return err
+ }
+ b.Refresh()
+ backOffAttempts++
+ }
+ if lastError == nil || err.Error() != lastError.Error() || MaxBulkRetries-1 == attempts {
+ if lastError != nil {
+ logging.Infof("(... attempt: %v) Pool Get returned %v: %v", attempts-1, bname, err)
+ }
+ logging.Infof("(Attempt: %v) Pool Get returned %v: %v", attempts, bname, err)
+ lastError = err
+ }
+ // retry
+ return nil
+ }
+ lastError = nil
+
+ conn.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ err = conn.GetBulk(vb, keys, rv, subPaths, context...)
+
+ discard := false
+ defer func() {
+ if discard {
+ pool.Discard(conn)
+ } else {
+ pool.Return(conn)
+ }
+ }()
+
+ switch err.(type) {
+ case *gomemcached.MCResponse:
+ notSMaxTries := len(b.Nodes()) * 2
+ st := err.(*gomemcached.MCResponse).Status
+ if st == gomemcached.NOT_MY_VBUCKET {
+
+ // increment first, as we want a delay
+ backOffAttempts++
+ backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, false)
+ b.Refresh()
+ discard = b.checkVBmap(pool.Node())
+ return nil // retry
+ } else if st == gomemcached.NOT_SUPPORTED && attempts < notSMaxTries {
+ b.Refresh()
+ discard = b.checkVBmap(pool.Node())
+ return nil // retry
+ } else if st == gomemcached.EBUSY || st == gomemcached.LOCKED {
+ if (attempts % (MaxBulkRetries / 100)) == 0 {
+ logging.Infof("Retrying Memcached error (%v) FOR %v(vbid:%d, keys:%v)",
+ err.Error(), bname, vb, keys)
+ }
+ return nil // retry
+ } else if (st == gomemcached.ENOMEM || st == gomemcached.TMPFAIL) && backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ // MB-30967 / MB-31001 use backoff for TMPFAIL too
+ backOffAttempts++
+ logging.Infof("Retrying Memcached error (%v) FOR %v(vbid:%d, keys:%v)",
+ err.Error(), bname, vb, keys)
+ return nil // retry
+ }
+ ech <- err
+ return err
+ case error:
+ if isOutOfBoundsError(err) {
+ // We got an out of bounds error or a read timeout error; retry the operation
+ discard = true
+ return nil
+ } else if isConnError(err) && backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ backOffAttempts++
+ logging.Errorf("Connection Error: %s. Refreshing bucket %v (vbid:%v,keys:%v)",
+ err.Error(), bname, vb, keys)
+ discard = true
+ b.Refresh()
+ return nil // retry
+ } else if IsReadTimeOutError(err) {
+ discard = true
+ logging.Errorf("Attempt %v: Terminating bulk request on timeout.", attempts)
+ }
+ ech <- err
+ ch <- rv
+ return err
+ }
+
+ done = true
+ return nil
+ }()
+
+ if err != nil {
+ return
+ }
+ }
+
+ if attempts >= MaxBulkRetries {
+ err := fmt.Errorf("bulkget exceeded MaxBulkRetries for %v(vbid:%d,keys:%v)", bname, vb, keys)
+ logging.Errorf("%v", err.Error())
+ ech <- err
+ }
+
+ ch <- rv
+}
+
+type errorStatus struct {
+ errStatus bool
+}
+
+type vbBulkGet struct {
+ b *Bucket
+ ch chan<- map[string]*gomemcached.MCResponse
+ ech chan<- error
+ k uint16
+ keys []string
+ reqDeadline time.Time
+ wg *sync.WaitGroup
+ subPaths []string
+ groupError *errorStatus
+ context []*memcached.ClientContext
+}
+
+const _NUM_CHANNELS = 5
+
+var _NUM_CHANNEL_WORKERS = (runtime.NumCPU() + 1) / 2
+var DefaultDialTimeout = time.Duration(0)
+var DefaultTimeout = time.Duration(0)
+var noDeadline = time.Time{}
+
+// Buffer 4k requests per worker
+var _VB_BULK_GET_CHANNELS []chan *vbBulkGet
+
+func InitBulkGet() {
+
+ DefaultDialTimeout = 20 * time.Second
+ DefaultTimeout = 120 * time.Second
+
+ memcached.SetDefaultDialTimeout(DefaultDialTimeout)
+
+ _VB_BULK_GET_CHANNELS = make([]chan *vbBulkGet, _NUM_CHANNELS)
+
+ for i := 0; i < _NUM_CHANNELS; i++ {
+ channel := make(chan *vbBulkGet, 16*1024*_NUM_CHANNEL_WORKERS)
+ _VB_BULK_GET_CHANNELS[i] = channel
+
+ for j := 0; j < _NUM_CHANNEL_WORKERS; j++ {
+ go vbBulkGetWorker(channel)
+ }
+ }
+}
+
+func vbBulkGetWorker(ch chan *vbBulkGet) {
+ defer func() {
+ // Workers cannot panic and die
+ recover()
+ go vbBulkGetWorker(ch)
+ }()
+
+ for vbg := range ch {
+ vbDoBulkGet(vbg)
+ }
+}
+
+func vbDoBulkGet(vbg *vbBulkGet) {
+ defer vbg.wg.Done()
+ defer func() {
+ // Workers cannot panic and die
+ recover()
+ }()
+ vbg.b.doBulkGet(vbg.k, vbg.keys, vbg.reqDeadline, vbg.ch, vbg.ech, vbg.subPaths, vbg.groupError, vbg.context...)
+}
+
+var _ERR_CHAN_FULL = fmt.Errorf("Data request queue full, aborting query.")
+
+func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time,
+ ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
+ eStatus *errorStatus, context ...*memcached.ClientContext) {
+
+ defer close(ch)
+ defer close(ech)
+
+ wg := &sync.WaitGroup{}
+
+ for k, keys := range kdm {
+
+ // GetBulk() group has error donot Queue items for this group
+ if eStatus.errStatus {
+ break
+ }
+
+ vbg := &vbBulkGet{
+ b: b,
+ ch: ch,
+ ech: ech,
+ k: k,
+ keys: keys,
+ reqDeadline: reqDeadline,
+ wg: wg,
+ subPaths: subPaths,
+ groupError: eStatus,
+ context: context,
+ }
+
+ wg.Add(1)
+
+ // Random int
+ // Right shift to avoid 8-byte alignment, and take low bits
+ c := (uintptr(unsafe.Pointer(vbg)) >> 4) % _NUM_CHANNELS
+
+ select {
+ case _VB_BULK_GET_CHANNELS[c] <- vbg:
+ // No-op
+ default:
+ // Buffer full, abandon the bulk get
+ ech <- _ERR_CHAN_FULL
+ wg.Add(-1)
+ }
+ }
+
+ // Wait for my vb bulk gets
+ wg.Wait()
+}
+
+type multiError []error
+
+func (m multiError) Error() string {
+ if len(m) == 0 {
+ panic("Error of none")
+ }
+
+ return fmt.Sprintf("{%v errors, starting with %v}", len(m), m[0].Error())
+}
+
+// Convert a stream of errors from ech into a multiError (or nil) and
+// send down eout.
+//
+// At least one send is guaranteed on eout, but two is possible, so
+// buffer the out channel appropriately.
+func errorCollector(ech <-chan error, eout chan<- error, eStatus *errorStatus) {
+ defer func() { eout <- nil }()
+ var errs multiError
+ for e := range ech {
+ if !eStatus.errStatus && !IsKeyNoEntError(e) {
+ eStatus.errStatus = true
+ }
+
+ errs = append(errs, e)
+ }
+
+ if len(errs) > 0 {
+ eout <- errs
+ }
+}
+
+// Fetches multiple keys concurrently, with []byte values
+//
+// This is a wrapper around GetBulk which converts all values returned
+// by GetBulk from raw memcached responses into []byte slices.
+// Returns one document for duplicate keys
+func (b *Bucket) GetBulkRaw(keys []string, context ...*memcached.ClientContext) (map[string][]byte, error) {
+
+ resp, eout := b.getBulk(keys, noDeadline, nil, context...)
+
+ rv := make(map[string][]byte, len(keys))
+ for k, av := range resp {
+ rv[k] = av.Body
+ }
+
+ b.ReleaseGetBulkPools(resp)
+ return rv, eout
+
+}
+
+// GetBulk fetches multiple keys concurrently.
+//
+// Unlike more convenient GETs, the entire response is returned in the
+// map array for each key. Keys that were not found will not be included in
+// the map.
+
+func (b *Bucket) GetBulk(keys []string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (map[string]*gomemcached.MCResponse, error) {
+ return b.getBulk(keys, reqDeadline, subPaths, context...)
+}
+
+func (b *Bucket) ReleaseGetBulkPools(rv map[string]*gomemcached.MCResponse) {
+ _STRING_MCRESPONSE_POOL.Put(rv)
+}
+
+func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (map[string]*gomemcached.MCResponse, error) {
+ kdm := _VB_STRING_POOL.Get()
+ defer _VB_STRING_POOL.Put(kdm)
+ for _, k := range keys {
+ if k != "" {
+ vb := uint16(b.VBHash(k))
+ a, ok1 := kdm[vb]
+ if !ok1 {
+ a = _STRING_POOL.Get()
+ }
+ kdm[vb] = append(a, k)
+ }
+ }
+
+ eout := make(chan error, 2)
+ groupErrorStatus := &errorStatus{}
+
+ // processBulkGet will own both of these channels and
+ // guarantee they're closed before it returns.
+ ch := make(chan map[string]*gomemcached.MCResponse)
+ ech := make(chan error)
+
+ go errorCollector(ech, eout, groupErrorStatus)
+ go b.processBulkGet(kdm, reqDeadline, ch, ech, subPaths, groupErrorStatus, context...)
+
+ var rv map[string]*gomemcached.MCResponse
+
+ for m := range ch {
+ if rv == nil {
+ rv = m
+ continue
+ }
+
+ for k, v := range m {
+ rv[k] = v
+ }
+ _STRING_MCRESPONSE_POOL.Put(m)
+ }
+
+ return rv, <-eout
+}
+
+// WriteOptions is the set of option flags availble for the Write
+// method. They are ORed together to specify the desired request.
+type WriteOptions int
+
+const (
+ // Raw specifies that the value is raw []byte or nil; don't
+ // JSON-encode it.
+ Raw = WriteOptions(1 << iota)
+ // AddOnly indicates an item should only be written if it
+ // doesn't exist, otherwise ErrKeyExists is returned.
+ AddOnly
+ // Persist causes the operation to block until the server
+ // confirms the item is persisted.
+ Persist
+ // Indexable causes the operation to block until it's availble via the index.
+ Indexable
+ // Append indicates the given value should be appended to the
+ // existing value for the given key.
+ Append
+)
+
+var optNames = []struct {
+ opt WriteOptions
+ name string
+}{
+ {Raw, "raw"},
+ {AddOnly, "addonly"}, {Persist, "persist"},
+ {Indexable, "indexable"}, {Append, "append"},
+}
+
+// String representation of WriteOptions
+func (w WriteOptions) String() string {
+ f := []string{}
+ for _, on := range optNames {
+ if w&on.opt != 0 {
+ f = append(f, on.name)
+ w &= ^on.opt
+ }
+ }
+ if len(f) == 0 || w != 0 {
+ f = append(f, fmt.Sprintf("0x%x", int(w)))
+ }
+ return strings.Join(f, "|")
+}
+
+// Error returned from Write with AddOnly flag, when key already exists in the bucket.
+var ErrKeyExists = errors.New("key exists")
+
+// General-purpose value setter.
+//
+// The Set, Add and Delete methods are just wrappers around this. The
+// interpretation of `v` depends on whether the `Raw` option is
+// given. If it is, v must be a byte array or nil. (A nil value causes
+// a delete.) If `Raw` is not given, `v` will be marshaled as JSON
+// before being written. It must be JSON-marshalable and it must not
+// be nil.
+func (b *Bucket) Write(k string, flags, exp int, v interface{},
+ opt WriteOptions, context ...*memcached.ClientContext) (err error) {
+
+ _, err = b.WriteWithCAS(k, flags, exp, v, opt, context...)
+
+ return err
+}
+
+func (b *Bucket) WriteWithCAS(k string, flags, exp int, v interface{},
+ opt WriteOptions, context ...*memcached.ClientContext) (cas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return cas, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ if opt&AddOnly != 0 {
+ res, err = memcached.UnwrapMemcachedError(
+ mc.Add(vb, k, flags, exp, data, context...))
+ if err == nil && res.Status != gomemcached.SUCCESS {
+ if res.Status == gomemcached.KEY_EEXISTS {
+ err = ErrKeyExists
+ } else {
+ err = res
+ }
+ }
+ } else if opt&Append != 0 {
+ res, err = mc.Append(vb, k, data, context...)
+ } else if data == nil {
+ res, err = mc.Del(vb, k, context...)
+ } else {
+ res, err = mc.Set(vb, k, flags, exp, data, context...)
+ }
+
+ if err == nil {
+ cas = res.Cas
+ }
+
+ return err
+ })
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, cas, data == nil)
+ }
+
+ return cas, err
+}
+
+func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
+ opt WriteOptions, context ...*memcached.ClientContext) (mt *MutationToken, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("WriteWithMT(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ if opt&AddOnly != 0 {
+ res, err = memcached.UnwrapMemcachedError(
+ mc.Add(vb, k, flags, exp, data, context...))
+ if err == nil && res.Status != gomemcached.SUCCESS {
+ if res.Status == gomemcached.KEY_EEXISTS {
+ err = ErrKeyExists
+ } else {
+ err = res
+ }
+ }
+ } else if opt&Append != 0 {
+ res, err = mc.Append(vb, k, data, context...)
+ } else if data == nil {
+ res, err = mc.Del(vb, k, context...)
+ } else {
+ res, err = mc.Set(vb, k, flags, exp, data, context...)
+ }
+
+ if len(res.Extras) >= 16 {
+ vbuuid := uint64(binary.BigEndian.Uint64(res.Extras[0:8]))
+ seqNo := uint64(binary.BigEndian.Uint64(res.Extras[8:16]))
+ mt = &MutationToken{VBid: vb, Guard: vbuuid, Value: seqNo}
+ }
+
+ return err
+ })
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return mt, err
+}
+
+// Set a value in this bucket with Cas and return the new Cas value
+func (b *Bucket) Cas(k string, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
+ return b.WriteCas(k, 0, exp, cas, v, 0, context...)
+}
+
+// Set a value in this bucket with Cas without json encoding it
+func (b *Bucket) CasRaw(k string, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
+ return b.WriteCas(k, 0, exp, cas, v, Raw, context...)
+}
+
+func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
+ opt WriteOptions, context ...*memcached.ClientContext) (newCas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return 0, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err = mc.SetCas(vb, k, flags, exp, cas, data, context...)
+ return err
+ })
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return res.Cas, err
+}
+
+// Extended CAS operation. These functions will return the mutation token, i.e vbuuid & guard
+func (b *Bucket) CasWithMeta(k string, flags int, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, *MutationToken, error) {
+ return b.WriteCasWithMT(k, flags, exp, cas, v, 0, context...)
+}
+
+func (b *Bucket) CasWithMetaRaw(k string, flags int, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, *MutationToken, error) {
+ return b.WriteCasWithMT(k, flags, exp, cas, v, Raw, context...)
+}
+
+func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interface{},
+ opt WriteOptions, context ...*memcached.ClientContext) (newCas uint64, mt *MutationToken, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return 0, nil, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err = mc.SetCas(vb, k, flags, exp, cas, data, context...)
+ return err
+ })
+
+ if err != nil {
+ return 0, nil, err
+ }
+
+ // check for extras
+ if len(res.Extras) >= 16 {
+ vbuuid := uint64(binary.BigEndian.Uint64(res.Extras[0:8]))
+ seqNo := uint64(binary.BigEndian.Uint64(res.Extras[8:16]))
+ vb := b.VBHash(k)
+ mt = &MutationToken{VBid: uint16(vb), Guard: vbuuid, Value: seqNo}
+ }
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return res.Cas, mt, err
+}
+
+// Set a value in this bucket.
+// The value will be serialized into a JSON document.
+func (b *Bucket) Set(k string, exp int, v interface{}, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, exp, v, 0, context...)
+}
+
+// Set a value in this bucket.
+func (b *Bucket) SetWithCAS(k string, exp int, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
+ return b.WriteWithCAS(k, 0, exp, v, 0, context...)
+}
+
+// Set a value in this bucket with with flags
+func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}, context ...*memcached.ClientContext) (*MutationToken, error) {
+ return b.WriteWithMT(k, flags, exp, v, 0, context...)
+}
+
+// SetRaw sets a value in this bucket without JSON encoding it.
+func (b *Bucket) SetRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, exp, v, Raw, context...)
+}
+
+// Add adds a value to this bucket; like Set except that nothing
+// happens if the key exists. The value will be serialized into a
+// JSON document.
+func (b *Bucket) Add(k string, exp int, v interface{}, context ...*memcached.ClientContext) (added bool, err error) {
+ err = b.Write(k, 0, exp, v, AddOnly, context...)
+ if err == ErrKeyExists {
+ return false, nil
+ }
+ return (err == nil), err
+}
+
+// Add adds a value to this bucket; like Set except that nothing
+// happens if the key exists. Return the CAS value.
+func (b *Bucket) AddWithCAS(k string, exp int, v interface{}, context ...*memcached.ClientContext) (bool, uint64, error) {
+ cas, err := b.WriteWithCAS(k, 0, exp, v, AddOnly, context...)
+ if err == ErrKeyExists {
+ return false, 0, nil
+ }
+ return (err == nil), cas, err
+}
+
+// AddRaw adds a value to this bucket; like SetRaw except that nothing
+// happens if the key exists. The value will be stored as raw bytes.
+func (b *Bucket) AddRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, err error) {
+ err = b.Write(k, 0, exp, v, AddOnly|Raw, context...)
+ if err == ErrKeyExists {
+ return false, nil
+ }
+ return (err == nil), err
+}
+
+// Add adds a value to this bucket; like Set except that nothing
+// happens if the key exists. The value will be serialized into a
+// JSON document.
+func (b *Bucket) AddWithMT(k string, exp int, v interface{}, context ...*memcached.ClientContext) (added bool, mt *MutationToken, err error) {
+ mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly, context...)
+ if err == ErrKeyExists {
+ return false, mt, nil
+ }
+ return (err == nil), mt, err
+}
+
+// AddRaw adds a value to this bucket; like SetRaw except that nothing
+// happens if the key exists. The value will be stored as raw bytes.
+func (b *Bucket) AddRawWithMT(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, mt *MutationToken, err error) {
+ mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly|Raw, context...)
+ if err == ErrKeyExists {
+ return false, mt, nil
+ }
+ return (err == nil), mt, err
+}
+
+// Append appends raw data to an existing item.
+func (b *Bucket) Append(k string, data []byte, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, 0, data, Append|Raw, context...)
+}
+
+// Returns collectionUid, manifestUid, error.
+func (b *Bucket) GetCollectionCID(scope string, collection string, reqDeadline time.Time) (uint32, uint32, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetCollectionCID", scope+"."+collection, t, err) }(time.Now())
+ }
+
+ var key = "DUMMY" // Contact any server.
+ var manifestUid uint32
+ var collUid uint32
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ _, err1 = mc.SelectBucket(b.Name)
+ if err1 != nil {
+ return err1
+ }
+
+ response, err1 = mc.CollectionsGetCID(scope, collection)
+ if err1 != nil {
+ return err1
+ }
+
+ manifestUid = binary.BigEndian.Uint32(response.Extras[4:8])
+ collUid = binary.BigEndian.Uint32(response.Extras[8:12])
+
+ return nil
+ }, false)
+
+ return collUid, manifestUid, err
+}
+
+// Get a value straight from Memcached
+func (b *Bucket) GetsMC(key string, reqDeadline time.Time, context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if key == "" {
+ return nil, nil
+ }
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsMC", key, t, err) }(time.Now())
+ }
+
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ response, err1 = mc.Get(vb, key, context...)
+ if err1 != nil {
+ return err1
+ }
+ return nil
+ }, false)
+ return response, err
+}
+
+// Get a value through the subdoc API
+func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if key == "" {
+ return nil, nil
+ }
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsSubDoc", key, t, err) }(time.Now())
+ }
+
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ response, err1 = mc.GetSubdoc(vb, key, subPaths, context...)
+ if err1 != nil {
+ return err1
+ }
+ return nil
+ }, false)
+ return response, err
+}
+
+// GetsRaw gets a raw value from this bucket including its CAS
+// counter and flags.
+func (b *Bucket) GetsRaw(k string, context ...*memcached.ClientContext) (data []byte, flags int,
+ cas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsRaw", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.Get(vb, k, context...)
+ if err != nil {
+ return err
+ }
+ cas = res.Cas
+ if len(res.Extras) >= 4 {
+ flags = int(binary.BigEndian.Uint32(res.Extras))
+ }
+ data = res.Body
+ return nil
+ })
+ return
+}
+
+// Gets gets a value from this bucket, including its CAS counter. The
+// value is expected to be a JSON stream and will be deserialized into
+// rv.
+func (b *Bucket) Gets(k string, rv interface{}, caso *uint64, context ...*memcached.ClientContext) error {
+ data, _, cas, err := b.GetsRaw(k, context...)
+ if err != nil {
+ return err
+ }
+ if caso != nil {
+ *caso = cas
+ }
+ return json.Unmarshal(data, rv)
+}
+
+// Get a value from this bucket.
+// The value is expected to be a JSON stream and will be deserialized
+// into rv.
+func (b *Bucket) Get(k string, rv interface{}, context ...*memcached.ClientContext) error {
+ return b.Gets(k, rv, nil, context...)
+}
+
+// GetRaw gets a raw value from this bucket. No marshaling is performed.
+func (b *Bucket) GetRaw(k string, context ...*memcached.ClientContext) ([]byte, error) {
+ d, _, _, err := b.GetsRaw(k, context...)
+ return d, err
+}
+
+// GetAndTouchRaw gets a raw value from this bucket including its CAS
+// counter and flags, and updates the expiry on the doc.
+func (b *Bucket) GetAndTouchRaw(k string, exp int, context ...*memcached.ClientContext) (data []byte,
+ cas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsRaw", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.GetAndTouch(vb, k, exp, context...)
+ if err != nil {
+ return err
+ }
+ cas = res.Cas
+ data = res.Body
+ return nil
+ })
+ return data, cas, err
+}
+
+// GetMeta returns the meta values for a key
+func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *uint64, context ...*memcached.ClientContext) (err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsMeta", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.GetMeta(vb, k, context...)
+ if err != nil {
+ return err
+ }
+
+ *cas = res.Cas
+ if len(res.Extras) >= 8 {
+ *flags = int(binary.BigEndian.Uint32(res.Extras[4:]))
+ }
+
+ if len(res.Extras) >= 12 {
+ *expiry = int(binary.BigEndian.Uint32(res.Extras[8:]))
+ }
+
+ if len(res.Extras) >= 20 {
+ *seqNo = uint64(binary.BigEndian.Uint64(res.Extras[12:]))
+ }
+
+ return nil
+ })
+
+ return err
+}
+
+// Delete a key from this bucket.
+func (b *Bucket) Delete(k string, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, 0, nil, Raw, context...)
+}
+
+// Incr increments the value at a given key by amt and defaults to def if no value present.
+func (b *Bucket) Incr(k string, amt, def uint64, exp int, context ...*memcached.ClientContext) (val uint64, err error) {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("Incr", k, t, err) }(time.Now())
+ }
+
+ var rv uint64
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.Incr(vb, k, amt, def, exp, context...)
+ if err != nil {
+ return err
+ }
+ rv = res
+ return nil
+ })
+ return rv, err
+}
+
+// Decr decrements the value at a given key by amt and defaults to def if no value present
+func (b *Bucket) Decr(k string, amt, def uint64, exp int, context ...*memcached.ClientContext) (val uint64, err error) {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("Decr", k, t, err) }(time.Now())
+ }
+
+ var rv uint64
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.Decr(vb, k, amt, def, exp, context...)
+ if err != nil {
+ return err
+ }
+ rv = res
+ return nil
+ })
+ return rv, err
+}
+
+// Wrapper around memcached.CASNext()
+func (b *Bucket) casNext(k string, exp int, state *memcached.CASState) bool {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback("casNext", k, t, state.Err)
+ }(time.Now())
+ }
+
+ keepGoing := false
+ state.Err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ keepGoing = mc.CASNext(vb, k, exp, state)
+ return state.Err
+ })
+ return keepGoing && state.Err == nil
+}
+
+// An UpdateFunc is a callback function to update a document
+type UpdateFunc func(current []byte) (updated []byte, err error)
+
+// Return this as the error from an UpdateFunc to cancel the Update
+// operation.
+const UpdateCancel = memcached.CASQuit
+
+// Update performs a Safe update of a document, avoiding conflicts by
+// using CAS.
+//
+// The callback function will be invoked with the current raw document
+// contents (or nil if the document doesn't exist); it should return
+// the updated raw contents (or nil to delete.) If it decides not to
+// change anything it can return UpdateCancel as the error.
+//
+// If another writer modifies the document between the get and the
+// set, the callback will be invoked again with the newer value.
+func (b *Bucket) Update(k string, exp int, callback UpdateFunc) error {
+ _, err := b.update(k, exp, callback)
+ return err
+}
+
+// internal version of Update that returns a CAS value
+func (b *Bucket) update(k string, exp int, callback UpdateFunc) (newCas uint64, err error) {
+ var state memcached.CASState
+ for b.casNext(k, exp, &state) {
+ var err error
+ if state.Value, err = callback(state.Value); err != nil {
+ return 0, err
+ }
+ }
+ return state.Cas, state.Err
+}
+
+// A WriteUpdateFunc is a callback function to update a document
+type WriteUpdateFunc func(current []byte) (updated []byte, opt WriteOptions, err error)
+
+// WriteUpdate performs a Safe update of a document, avoiding
+// conflicts by using CAS. WriteUpdate is like Update, except that
+// the callback can return a set of WriteOptions, of which Persist and
+// Indexable are recognized: these cause the call to wait until the
+// document update has been persisted to disk and/or become available
+// to index.
+func (b *Bucket) WriteUpdate(k string, exp int, callback WriteUpdateFunc) error {
+ var writeOpts WriteOptions
+ var deletion bool
+ // Wrap the callback in an UpdateFunc we can pass to Update:
+ updateCallback := func(current []byte) (updated []byte, err error) {
+ update, opt, err := callback(current)
+ writeOpts = opt
+ deletion = (update == nil)
+ return update, err
+ }
+ cas, err := b.update(k, exp, updateCallback)
+ if err != nil {
+ return err
+ }
+ // If callback asked, wait for persistence or indexability:
+ if writeOpts&(Persist|Indexable) != 0 {
+ err = b.WaitForPersistence(k, cas, deletion)
+ }
+ return err
+}
+
+// Observe observes the current state of a document.
+func (b *Bucket) Observe(k string) (result memcached.ObserveResult, err error) {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("Observe", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ result, err = mc.Observe(vb, k)
+ return err
+ })
+ return
+}
+
+// Returned from WaitForPersistence (or Write, if the Persistent or Indexable flag is used)
+// if the value has been overwritten by another before being persisted.
+var ErrOverwritten = errors.New("overwritten")
+
+// Returned from WaitForPersistence (or Write, if the Persistent or Indexable flag is used)
+// if the value hasn't been persisted by the timeout interval
+var ErrTimeout = errors.New("timeout")
+
+// WaitForPersistence waits for an item to be considered durable.
+//
+// Besides transport errors, ErrOverwritten may be returned if the
+// item is overwritten before it reaches durability. ErrTimeout may
+// occur if the item isn't found durable in a reasonable amount of
+// time.
+func (b *Bucket) WaitForPersistence(k string, cas uint64, deletion bool) error {
+ timeout := 10 * time.Second
+ sleepDelay := 5 * time.Millisecond
+ start := time.Now()
+ for {
+ time.Sleep(sleepDelay)
+ sleepDelay += sleepDelay / 2 // multiply delay by 1.5 every time
+
+ result, err := b.Observe(k)
+ if err != nil {
+ return err
+ }
+ if persisted, overwritten := result.CheckPersistence(cas, deletion); overwritten {
+ return ErrOverwritten
+ } else if persisted {
+ return nil
+ }
+
+ if result.PersistenceTime > 0 {
+ timeout = 2 * result.PersistenceTime
+ }
+ if time.Since(start) >= timeout-sleepDelay {
+ return ErrTimeout
+ }
+ }
+}
+
+var _STRING_MCRESPONSE_POOL = gomemcached.NewStringMCResponsePool(16)
+
+type stringPool struct {
+ pool *sync.Pool
+ size int
+}
+
+func newStringPool(size int) *stringPool {
+ rv := &stringPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return make([]string, 0, size)
+ },
+ },
+ size: size,
+ }
+
+ return rv
+}
+
+func (this *stringPool) Get() []string {
+ return this.pool.Get().([]string)
+}
+
+func (this *stringPool) Put(s []string) {
+ if s == nil || cap(s) < this.size || cap(s) > 2*this.size {
+ return
+ }
+
+ this.pool.Put(s[0:0])
+}
+
+var _STRING_POOL = newStringPool(16)
+
+type vbStringPool struct {
+ pool *sync.Pool
+ strPool *stringPool
+}
+
+func newVBStringPool(size int, sp *stringPool) *vbStringPool {
+ rv := &vbStringPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return make(map[uint16][]string, size)
+ },
+ },
+ strPool: sp,
+ }
+
+ return rv
+}
+
+func (this *vbStringPool) Get() map[uint16][]string {
+ return this.pool.Get().(map[uint16][]string)
+}
+
+func (this *vbStringPool) Put(s map[uint16][]string) {
+ if s == nil {
+ return
+ }
+
+ for k, v := range s {
+ delete(s, k)
+ this.strPool.Put(v)
+ }
+
+ this.pool.Put(s)
+}
+
+var _VB_STRING_POOL = newVBStringPool(16, _STRING_POOL)
diff --git a/vendor/github.com/couchbase/go-couchbase/conn_pool.go b/vendor/github.com/couchbase/go-couchbase/conn_pool.go
new file mode 100644
index 00000000..47854c09
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/conn_pool.go
@@ -0,0 +1,421 @@
+package couchbase
+
+import (
+ "crypto/tls"
+ "errors"
+ "sync/atomic"
+ "time"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/client"
+ "github.com/couchbase/goutils/logging"
+)
+
+// GenericMcdAuthHandler is a kind of AuthHandler that performs
+// special auth exchange (like non-standard auth, possibly followed by
+// select-bucket).
+type GenericMcdAuthHandler interface {
+ AuthHandler
+ AuthenticateMemcachedConn(host string, conn *memcached.Client) error
+}
+
+// Error raised when a connection can't be retrieved from a pool.
+var TimeoutError = errors.New("timeout waiting to build connection")
+var errClosedPool = errors.New("the connection pool is closed")
+var errNoPool = errors.New("no connection pool")
+
+// Default timeout for retrieving a connection from the pool.
+var ConnPoolTimeout = time.Hour * 24 * 30
+
+// overflow connection closer cycle time
+var ConnCloserInterval = time.Second * 30
+
+// ConnPoolAvailWaitTime is the amount of time to wait for an existing
+// connection from the pool before considering the creation of a new
+// one.
+var ConnPoolAvailWaitTime = time.Millisecond
+
+type connectionPool struct {
+ host string
+ mkConn func(host string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error)
+ auth AuthHandler
+ connections chan *memcached.Client
+ createsem chan bool
+ bailOut chan bool
+ poolSize int
+ connCount uint64
+ inUse bool
+ encrypted bool
+ tlsConfig *tls.Config
+ bucket string
+}
+
+func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string, encrypted bool) *connectionPool {
+ connSize := poolSize
+ if closer {
+ connSize += poolOverflow
+ }
+ rv := &connectionPool{
+ host: host,
+ connections: make(chan *memcached.Client, connSize),
+ createsem: make(chan bool, poolSize+poolOverflow),
+ mkConn: defaultMkConn,
+ auth: ah,
+ poolSize: poolSize,
+ bucket: bucket,
+ encrypted: encrypted,
+ }
+
+ if encrypted {
+ rv.tlsConfig = tlsConfig
+ }
+
+ if closer {
+ rv.bailOut = make(chan bool, 1)
+ go rv.connCloser()
+ }
+ return rv
+}
+
+// ConnPoolTimeout is notified whenever connections are acquired from a pool.
+var ConnPoolCallback func(host string, source string, start time.Time, err error)
+
+// Use regular in-the-clear connection if tlsConfig is nil.
+// Use secure connection (TLS) if tlsConfig is set.
+func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error) {
+ var features memcached.Features
+
+ var conn *memcached.Client
+ var err error
+ if tlsConfig == nil {
+ conn, err = memcached.Connect("tcp", host)
+ } else {
+ conn, err = memcached.ConnectTLS("tcp", host, tlsConfig)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
+ }
+
+ if TCPKeepalive == true {
+ conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second)
+ }
+
+ if EnableMutationToken == true {
+ features = append(features, memcached.FeatureMutationToken)
+ }
+ if EnableDataType == true {
+ features = append(features, memcached.FeatureDataType)
+ }
+
+ if EnableXattr == true {
+ features = append(features, memcached.FeatureXattr)
+ }
+
+ if EnableCollections {
+ features = append(features, memcached.FeatureCollections)
+ }
+
+ if len(features) > 0 {
+ res, err := conn.EnableFeatures(features)
+ if err != nil && isTimeoutError(err) {
+ conn.Close()
+ return nil, err
+ }
+
+ if err != nil || res.Status != gomemcached.SUCCESS {
+ logging.Warnf("Unable to enable features %v", err)
+ }
+ }
+
+ if gah, ok := ah.(GenericMcdAuthHandler); ok {
+ err = gah.AuthenticateMemcachedConn(host, conn)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(noDeadline)
+ }
+
+ return conn, nil
+ }
+ name, pass, bucket := ah.GetCredentials()
+ if bucket == "" {
+ // Authenticator does not know specific bucket.
+ bucket = bucketName
+ }
+
+ if name != "default" {
+ _, err = conn.Auth(name, pass)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ // Select bucket (Required for cb_auth creds)
+ // Required when doing auth with _admin credentials
+ if bucket != "" && bucket != name {
+ _, err = conn.SelectBucket(bucket)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ }
+ }
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(noDeadline)
+ }
+
+ return conn, nil
+}
+
+func (cp *connectionPool) Close() (err error) {
+ defer func() {
+ if recover() != nil {
+ err = errors.New("connectionPool.Close error")
+ }
+ }()
+ if cp.bailOut != nil {
+
+ // defensively, we won't wait if the channel is full
+ select {
+ case cp.bailOut <- false:
+ default:
+ }
+ }
+ close(cp.connections)
+ for c := range cp.connections {
+ c.Close()
+ }
+ return
+}
+
+func (cp *connectionPool) Node() string {
+ return cp.host
+}
+
+func (cp *connectionPool) GetWithTimeout(d time.Duration) (rv *memcached.Client, err error) {
+ if cp == nil {
+ return nil, errNoPool
+ }
+
+ path := ""
+
+ if ConnPoolCallback != nil {
+ defer func(path *string, start time.Time) {
+ ConnPoolCallback(cp.host, *path, start, err)
+ }(&path, time.Now())
+ }
+
+ path = "short-circuit"
+
+ // short-circuit available connetions.
+ select {
+ case rv, isopen := <-cp.connections:
+ if !isopen {
+ return nil, errClosedPool
+ }
+ atomic.AddUint64(&cp.connCount, 1)
+ return rv, nil
+ default:
+ }
+
+ t := time.NewTimer(ConnPoolAvailWaitTime)
+ defer t.Stop()
+
+ // Try to grab an available connection within 1ms
+ select {
+ case rv, isopen := <-cp.connections:
+ path = "avail1"
+ if !isopen {
+ return nil, errClosedPool
+ }
+ atomic.AddUint64(&cp.connCount, 1)
+ return rv, nil
+ case <-t.C:
+ // No connection came around in time, let's see
+ // whether we can get one or build a new one first.
+ t.Reset(d) // Reuse the timer for the full timeout.
+ select {
+ case rv, isopen := <-cp.connections:
+ path = "avail2"
+ if !isopen {
+ return nil, errClosedPool
+ }
+ atomic.AddUint64(&cp.connCount, 1)
+ return rv, nil
+ case cp.createsem <- true:
+ path = "create"
+ // Build a connection if we can't get a real one.
+ // This can potentially be an overflow connection, or
+ // a pooled connection.
+ rv, err := cp.mkConn(cp.host, cp.auth, cp.tlsConfig, cp.bucket)
+ if err != nil {
+ // On error, release our create hold
+ <-cp.createsem
+ } else {
+ atomic.AddUint64(&cp.connCount, 1)
+ }
+ return rv, err
+ case <-t.C:
+ return nil, ErrTimeout
+ }
+ }
+}
+
+func (cp *connectionPool) Get() (*memcached.Client, error) {
+ return cp.GetWithTimeout(ConnPoolTimeout)
+}
+
+func (cp *connectionPool) Return(c *memcached.Client) {
+ if c == nil {
+ return
+ }
+
+ if cp == nil {
+ c.Close()
+ }
+
+ if c.IsHealthy() {
+ defer func() {
+ if recover() != nil {
+ // This happens when the pool has already been
+ // closed and we're trying to return a
+ // connection to it anyway. Just close the
+ // connection.
+ c.Close()
+ }
+ }()
+
+ select {
+ case cp.connections <- c:
+ default:
+ <-cp.createsem
+ c.Close()
+ }
+ } else {
+ <-cp.createsem
+ c.Close()
+ }
+}
+
+// give the ability to discard a connection from a pool
+// useful for ditching connections to the wrong node after a rebalance
+func (cp *connectionPool) Discard(c *memcached.Client) {
+ <-cp.createsem
+ c.Close()
+}
+
+// asynchronous connection closer
+func (cp *connectionPool) connCloser() {
+ var connCount uint64
+
+ t := time.NewTimer(ConnCloserInterval)
+ defer t.Stop()
+
+ for {
+ connCount = cp.connCount
+
+ // we don't exist anymore! bail out!
+ select {
+ case <-cp.bailOut:
+ return
+ case <-t.C:
+ }
+ t.Reset(ConnCloserInterval)
+
+ // no overflow connections open or sustained requests for connections
+ // nothing to do until the next cycle
+ if len(cp.connections) <= cp.poolSize ||
+ ConnCloserInterval/ConnPoolAvailWaitTime < time.Duration(cp.connCount-connCount) {
+ continue
+ }
+
+ // close overflow connections now that they are not needed
+ for c := range cp.connections {
+ select {
+ case <-cp.bailOut:
+ return
+ default:
+ }
+
+ // bail out if close did not work out
+ if !cp.connCleanup(c) {
+ return
+ }
+ if len(cp.connections) <= cp.poolSize {
+ break
+ }
+ }
+ }
+}
+
+// close connection with recovery on error
+func (cp *connectionPool) connCleanup(c *memcached.Client) (rv bool) {
+
+ // just in case we are closing a connection after
+ // bailOut has been sent but we haven't yet read it
+ defer func() {
+ if recover() != nil {
+ rv = false
+ }
+ }()
+ rv = true
+
+ c.Close()
+ <-cp.createsem
+ return
+}
+
+func (cp *connectionPool) StartTapFeed(args *memcached.TapArguments) (*memcached.TapFeed, error) {
+ if cp == nil {
+ return nil, errNoPool
+ }
+ mc, err := cp.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ // A connection can't be used after TAP; Dont' count it against the
+ // connection pool capacity
+ <-cp.createsem
+
+ return mc.StartTapFeed(*args)
+}
+
+const DEFAULT_WINDOW_SIZE = 20 * 1024 * 1024 // 20 Mb
+
+func (cp *connectionPool) StartUprFeed(name string, sequence uint32, dcp_buffer_size uint32, data_chan_size int) (*memcached.UprFeed, error) {
+ if cp == nil {
+ return nil, errNoPool
+ }
+ mc, err := cp.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ // A connection can't be used after it has been allocated to UPR;
+ // Dont' count it against the connection pool capacity
+ <-cp.createsem
+
+ uf, err := mc.NewUprFeed()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := uf.UprOpen(name, sequence, dcp_buffer_size); err != nil {
+ return nil, err
+ }
+
+ if err := uf.StartFeedWithConfig(data_chan_size); err != nil {
+ return nil, err
+ }
+
+ return uf, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/ddocs.go b/vendor/github.com/couchbase/go-couchbase/ddocs.go
new file mode 100644
index 00000000..f9cc343a
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/ddocs.go
@@ -0,0 +1,288 @@
+package couchbase
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/couchbase/goutils/logging"
+ "io/ioutil"
+ "net/http"
+)
+
+// ViewDefinition represents a single view within a design document.
+type ViewDefinition struct {
+ Map string `json:"map"`
+ Reduce string `json:"reduce,omitempty"`
+}
+
+// DDoc is the document body of a design document specifying a view.
+type DDoc struct {
+ Language string `json:"language,omitempty"`
+ Views map[string]ViewDefinition `json:"views"`
+}
+
+// DDocsResult represents the result from listing the design
+// documents.
+type DDocsResult struct {
+ Rows []struct {
+ DDoc struct {
+ Meta map[string]interface{}
+ JSON DDoc
+ } `json:"doc"`
+ } `json:"rows"`
+}
+
+// GetDDocs lists all design documents
+func (b *Bucket) GetDDocs() (DDocsResult, error) {
+ var ddocsResult DDocsResult
+ b.RLock()
+ pool := b.pool
+ uri := b.DDocs.URI
+ b.RUnlock()
+
+ // MB-23555 ephemeral buckets have no ddocs
+ if uri == "" {
+ return DDocsResult{}, nil
+ }
+
+ err := pool.client.parseURLResponse(uri, &ddocsResult)
+ if err != nil {
+ return DDocsResult{}, err
+ }
+ return ddocsResult, nil
+}
+
+func (b *Bucket) GetDDocWithRetry(docname string, into interface{}) error {
+ ddocURI := fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
+ err := b.parseAPIResponse(ddocURI, &into)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Bucket) GetDDocsWithRetry() (DDocsResult, error) {
+ var ddocsResult DDocsResult
+ b.RLock()
+ uri := b.DDocs.URI
+ b.RUnlock()
+
+ // MB-23555 ephemeral buckets have no ddocs
+ if uri == "" {
+ return DDocsResult{}, nil
+ }
+
+ err := b.parseURLResponse(uri, &ddocsResult)
+ if err != nil {
+ return DDocsResult{}, err
+ }
+ return ddocsResult, nil
+}
+
+func (b *Bucket) ddocURL(docname string) (string, error) {
+ u, err := b.randomBaseURL()
+ if err != nil {
+ return "", err
+ }
+ u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
+ return u.String(), nil
+}
+
+func (b *Bucket) ddocURLNext(nodeId int, docname string) (string, int, error) {
+ u, selected, err := b.randomNextURL(nodeId)
+ if err != nil {
+ return "", -1, err
+ }
+ u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
+ return u.String(), selected, nil
+}
+
+const ABS_MAX_RETRIES = 10
+const ABS_MIN_RETRIES = 3
+
+func (b *Bucket) getMaxRetries() (int, error) {
+
+ maxRetries := len(b.Nodes())
+
+ if maxRetries == 0 {
+ return 0, fmt.Errorf("No available Couch rest URLs")
+ }
+
+ if maxRetries > ABS_MAX_RETRIES {
+ maxRetries = ABS_MAX_RETRIES
+ } else if maxRetries < ABS_MIN_RETRIES {
+ maxRetries = ABS_MIN_RETRIES
+ }
+
+ return maxRetries, nil
+}
+
+// PutDDoc installs a design document.
+func (b *Bucket) PutDDoc(docname string, value interface{}) error {
+
+ var Err error
+
+ maxRetries, err := b.getMaxRetries()
+ if err != nil {
+ return err
+ }
+
+ lastNode := START_NODE_ID
+
+ for retryCount := 0; retryCount < maxRetries; retryCount++ {
+
+ Err = nil
+
+ ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
+ if err != nil {
+ return err
+ }
+
+ lastNode = selectedNode
+
+ logging.Infof(" Trying with selected node %d", selectedNode)
+ j, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("PUT", ddocU, bytes.NewReader(j))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */))
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ if res.StatusCode != 201 {
+ body, _ := ioutil.ReadAll(res.Body)
+ Err = fmt.Errorf("error installing view: %v / %s",
+ res.Status, body)
+ logging.Errorf(" Error in PutDDOC %v. Retrying...", Err)
+ res.Body.Close()
+ b.Refresh()
+ continue
+ }
+
+ res.Body.Close()
+ break
+ }
+
+ return Err
+}
+
+// GetDDoc retrieves a specific a design doc.
+func (b *Bucket) GetDDoc(docname string, into interface{}) error {
+ var Err error
+ var res *http.Response
+
+ maxRetries, err := b.getMaxRetries()
+ if err != nil {
+ return err
+ }
+
+ lastNode := START_NODE_ID
+ for retryCount := 0; retryCount < maxRetries; retryCount++ {
+
+ Err = nil
+ ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
+ if err != nil {
+ return err
+ }
+
+ lastNode = selectedNode
+ logging.Infof(" Trying with selected node %d", selectedNode)
+
+ req, err := http.NewRequest("GET", ddocU, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */))
+ if err != nil {
+ return err
+ }
+
+ res, err = doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode != 200 {
+ body, _ := ioutil.ReadAll(res.Body)
+ Err = fmt.Errorf("error reading view: %v / %s",
+ res.Status, body)
+ logging.Errorf(" Error in GetDDOC %v Retrying...", Err)
+ b.Refresh()
+ res.Body.Close()
+ continue
+ }
+ defer res.Body.Close()
+ break
+ }
+
+ if Err != nil {
+ return Err
+ }
+
+ d := json.NewDecoder(res.Body)
+ return d.Decode(into)
+}
+
+// DeleteDDoc removes a design document.
+func (b *Bucket) DeleteDDoc(docname string) error {
+
+ var Err error
+
+ maxRetries, err := b.getMaxRetries()
+ if err != nil {
+ return err
+ }
+
+ lastNode := START_NODE_ID
+
+ for retryCount := 0; retryCount < maxRetries; retryCount++ {
+
+ Err = nil
+ ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
+ if err != nil {
+ return err
+ }
+
+ lastNode = selectedNode
+ logging.Infof(" Trying with selected node %d", selectedNode)
+
+ req, err := http.NewRequest("DELETE", ddocU, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ err = maybeAddAuth(req, b.authHandler(false /* bucket not already locked */))
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode != 200 {
+ body, _ := ioutil.ReadAll(res.Body)
+ Err = fmt.Errorf("error deleting view : %v / %s", res.Status, body)
+ logging.Errorf(" Error in DeleteDDOC %v. Retrying ... ", Err)
+ b.Refresh()
+ res.Body.Close()
+ continue
+ }
+
+ res.Body.Close()
+ break
+ }
+ return Err
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/observe.go b/vendor/github.com/couchbase/go-couchbase/observe.go
new file mode 100644
index 00000000..6e746f5a
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/observe.go
@@ -0,0 +1,300 @@
+package couchbase
+
+import (
+ "fmt"
+ "github.com/couchbase/goutils/logging"
+ "sync"
+)
+
+type PersistTo uint8
+
+const (
+ PersistNone = PersistTo(0x00)
+ PersistMaster = PersistTo(0x01)
+ PersistOne = PersistTo(0x02)
+ PersistTwo = PersistTo(0x03)
+ PersistThree = PersistTo(0x04)
+ PersistFour = PersistTo(0x05)
+)
+
+type ObserveTo uint8
+
+const (
+ ObserveNone = ObserveTo(0x00)
+ ObserveReplicateOne = ObserveTo(0x01)
+ ObserveReplicateTwo = ObserveTo(0x02)
+ ObserveReplicateThree = ObserveTo(0x03)
+ ObserveReplicateFour = ObserveTo(0x04)
+)
+
+type JobType uint8
+
+const (
+ OBSERVE = JobType(0x00)
+ PERSIST = JobType(0x01)
+)
+
+type ObservePersistJob struct {
+ vb uint16
+ vbuuid uint64
+ hostname string
+ jobType JobType
+ failover uint8
+ lastPersistedSeqNo uint64
+ currentSeqNo uint64
+ resultChan chan *ObservePersistJob
+ errorChan chan *OPErrResponse
+}
+
+type OPErrResponse struct {
+ vb uint16
+ vbuuid uint64
+ err error
+ job *ObservePersistJob
+}
+
+var ObservePersistPool = NewPool(1024)
+var OPJobChan = make(chan *ObservePersistJob, 1024)
+var OPJobDone = make(chan bool)
+
+var wg sync.WaitGroup
+
+func (b *Bucket) StartOPPollers(maxWorkers int) {
+
+ for i := 0; i < maxWorkers; i++ {
+ go b.OPJobPoll()
+ wg.Add(1)
+ }
+ wg.Wait()
+}
+
+func (b *Bucket) SetObserveAndPersist(nPersist PersistTo, nObserve ObserveTo) (err error) {
+
+ numNodes := len(b.Nodes())
+ if int(nPersist) > numNodes || int(nObserve) > numNodes {
+ return fmt.Errorf("Not enough healthy nodes in the cluster")
+ }
+
+ if int(nPersist) > (b.Replicas+1) || int(nObserve) > b.Replicas {
+ return fmt.Errorf("Not enough replicas in the cluster")
+ }
+
+ if EnableMutationToken == false {
+ return fmt.Errorf("Mutation Tokens not enabled ")
+ }
+
+ b.ds = &DurablitySettings{Persist: PersistTo(nPersist), Observe: ObserveTo(nObserve)}
+ return
+}
+
+func (b *Bucket) ObserveAndPersistPoll(vb uint16, vbuuid uint64, seqNo uint64) (err error, failover bool) {
+ b.RLock()
+ ds := b.ds
+ b.RUnlock()
+
+ if ds == nil {
+ return
+ }
+
+ nj := 0 // total number of jobs
+ resultChan := make(chan *ObservePersistJob, 10)
+ errChan := make(chan *OPErrResponse, 10)
+
+ nodes := b.GetNodeList(vb)
+ if int(ds.Observe) > len(nodes) || int(ds.Persist) > len(nodes) {
+ return fmt.Errorf("Not enough healthy nodes in the cluster"), false
+ }
+
+ logging.Infof("Node list %v", nodes)
+
+ if ds.Observe >= ObserveReplicateOne {
+ // create a job for each host
+ for i := ObserveReplicateOne; i < ds.Observe+1; i++ {
+ opJob := ObservePersistPool.Get()
+ opJob.vb = vb
+ opJob.vbuuid = vbuuid
+ opJob.jobType = OBSERVE
+ opJob.hostname = nodes[i]
+ opJob.resultChan = resultChan
+ opJob.errorChan = errChan
+
+ OPJobChan <- opJob
+ nj++
+
+ }
+ }
+
+ if ds.Persist >= PersistMaster {
+ for i := PersistMaster; i < ds.Persist+1; i++ {
+ opJob := ObservePersistPool.Get()
+ opJob.vb = vb
+ opJob.vbuuid = vbuuid
+ opJob.jobType = PERSIST
+ opJob.hostname = nodes[i]
+ opJob.resultChan = resultChan
+ opJob.errorChan = errChan
+
+ OPJobChan <- opJob
+ nj++
+
+ }
+ }
+
+ ok := true
+ for ok {
+ select {
+ case res := <-resultChan:
+ jobDone := false
+ if res.failover == 0 {
+ // no failover
+ if res.jobType == PERSIST {
+ if res.lastPersistedSeqNo >= seqNo {
+ jobDone = true
+ }
+
+ } else {
+ if res.currentSeqNo >= seqNo {
+ jobDone = true
+ }
+ }
+
+ if jobDone == true {
+ nj--
+ ObservePersistPool.Put(res)
+ } else {
+ // requeue this job
+ OPJobChan <- res
+ }
+
+ } else {
+ // Not currently handling failover scenarios TODO
+ nj--
+ ObservePersistPool.Put(res)
+ failover = true
+ }
+
+ if nj == 0 {
+ // done with all the jobs
+ ok = false
+ close(resultChan)
+ close(errChan)
+ }
+
+ case Err := <-errChan:
+ logging.Errorf("Error in Observe/Persist %v", Err.err)
+ err = fmt.Errorf("Error in Observe/Persist job %v", Err.err)
+ nj--
+ ObservePersistPool.Put(Err.job)
+ if nj == 0 {
+ close(resultChan)
+ close(errChan)
+ ok = false
+ }
+ }
+ }
+
+ return
+}
+
+func (b *Bucket) OPJobPoll() {
+
+ ok := true
+ for ok == true {
+ select {
+ case job := <-OPJobChan:
+ pool := b.getConnPoolByHost(job.hostname, false /* bucket not already locked */)
+ if pool == nil {
+ errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
+ errRes.err = fmt.Errorf("Pool not found for host %v", job.hostname)
+ errRes.job = job
+ job.errorChan <- errRes
+ continue
+ }
+ conn, err := pool.Get()
+ if err != nil {
+ errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
+ errRes.err = fmt.Errorf("Unable to get connection from pool %v", err)
+ errRes.job = job
+ job.errorChan <- errRes
+ continue
+ }
+
+ res, err := conn.ObserveSeq(job.vb, job.vbuuid)
+ if err != nil {
+ errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
+ errRes.err = fmt.Errorf("Command failed %v", err)
+ errRes.job = job
+ job.errorChan <- errRes
+ continue
+
+ }
+ pool.Return(conn)
+ job.lastPersistedSeqNo = res.LastPersistedSeqNo
+ job.currentSeqNo = res.CurrentSeqNo
+ job.failover = res.Failover
+
+ job.resultChan <- job
+ case <-OPJobDone:
+ logging.Infof("Observe Persist Poller exitting")
+ ok = false
+ }
+ }
+ wg.Done()
+}
+
+func (b *Bucket) GetNodeList(vb uint16) []string {
+
+ vbm := b.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ logging.Infof("vbmap smaller than vblist")
+ return nil
+ }
+
+ nodes := make([]string, len(vbm.VBucketMap[vb]))
+ for i := 0; i < len(vbm.VBucketMap[vb]); i++ {
+ n := vbm.VBucketMap[vb][i]
+ if n < 0 {
+ continue
+ }
+
+ node := b.getMasterNode(n)
+ if len(node) > 1 {
+ nodes[i] = node
+ }
+ continue
+
+ }
+ return nodes
+}
+
+//pool of ObservePersist Jobs
+type OPpool struct {
+ pool chan *ObservePersistJob
+}
+
+// NewPool creates a new pool of jobs
+func NewPool(max int) *OPpool {
+ return &OPpool{
+ pool: make(chan *ObservePersistJob, max),
+ }
+}
+
+// Borrow a Client from the pool.
+func (p *OPpool) Get() *ObservePersistJob {
+ var o *ObservePersistJob
+ select {
+ case o = <-p.pool:
+ default:
+ o = &ObservePersistJob{}
+ }
+ return o
+}
+
+// Return returns a Client to the pool.
+func (p *OPpool) Put(o *ObservePersistJob) {
+ select {
+ case p.pool <- o:
+ default:
+ // let it go, let it go...
+ }
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/pools.go b/vendor/github.com/couchbase/go-couchbase/pools.go
new file mode 100644
index 00000000..fdf7e000
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/pools.go
@@ -0,0 +1,1822 @@
+package couchbase
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/couchbase/goutils/logging"
+
+ "github.com/couchbase/gomemcached" // package name is 'gomemcached'
+ "github.com/couchbase/gomemcached/client" // package name is 'memcached'
+)
+
+// HTTPClient to use for REST and view operations.
+var MaxIdleConnsPerHost = 256
+var ClientTimeOut = 10 * time.Second
+var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
+var HTTPClient = &http.Client{Transport: HTTPTransport, Timeout: ClientTimeOut}
+
+// Use this client for reading from streams that should be open for an extended duration.
+var HTTPClientForStreaming = &http.Client{Transport: HTTPTransport, Timeout: 0}
+
+// PoolSize is the size of each connection pool (per host).
+var PoolSize = 64
+
+// PoolOverflow is the number of overflow connections allowed in a
+// pool.
+var PoolOverflow = 16
+
+// AsynchronousCloser turns on asynchronous closing for overflow connections
+var AsynchronousCloser = false
+
+// TCP KeepAlive enabled/disabled
+var TCPKeepalive = false
+
+// Enable MutationToken
+var EnableMutationToken = false
+
+// Enable Data Type response
+var EnableDataType = false
+
+// Enable Xattr
+var EnableXattr = false
+
+// Enable Collections
+var EnableCollections = false
+
+// TCP keepalive interval in seconds. Default 30 minutes
+var TCPKeepaliveInterval = 30 * 60
+
+// Used to decide whether to skip verification of certificates when
+// connecting to an ssl port.
+var skipVerify = true
+var certFile = ""
+var keyFile = ""
+var rootFile = ""
+
+func SetSkipVerify(skip bool) {
+ skipVerify = skip
+}
+
+func SetCertFile(cert string) {
+ certFile = cert
+}
+
+func SetKeyFile(cert string) {
+ keyFile = cert
+}
+
+func SetRootFile(cert string) {
+ rootFile = cert
+}
+
+// Allow applications to speciify the Poolsize and Overflow
+func SetConnectionPoolParams(size, overflow int) {
+
+ if size > 0 {
+ PoolSize = size
+ }
+
+ if overflow > 0 {
+ PoolOverflow = overflow
+ }
+}
+
+// Turn off overflow connections
+func DisableOverflowConnections() {
+ PoolOverflow = 0
+}
+
+// Toggle asynchronous overflow closer
+func EnableAsynchronousCloser(closer bool) {
+ AsynchronousCloser = closer
+}
+
+// Allow TCP keepalive parameters to be set by the application
+func SetTcpKeepalive(enabled bool, interval int) {
+
+ TCPKeepalive = enabled
+
+ if interval > 0 {
+ TCPKeepaliveInterval = interval
+ }
+}
+
+// AuthHandler is a callback that gets the auth username and password
+// for the given bucket.
+type AuthHandler interface {
+ GetCredentials() (string, string, string)
+}
+
+// AuthHandler is a callback that gets the auth username and password
+// for the given bucket and sasl for memcached.
+type AuthWithSaslHandler interface {
+ AuthHandler
+ GetSaslCredentials() (string, string)
+}
+
+// MultiBucketAuthHandler is kind of AuthHandler that may perform
+// different auth for different buckets.
+type MultiBucketAuthHandler interface {
+ AuthHandler
+ ForBucket(bucket string) AuthHandler
+}
+
+// HTTPAuthHandler is kind of AuthHandler that performs more general
+// for outgoing http requests than is possible via simple
+// GetCredentials() call (i.e. digest auth or different auth per
+// different destinations).
+type HTTPAuthHandler interface {
+ AuthHandler
+ SetCredsForRequest(req *http.Request) error
+}
+
+// RestPool represents a single pool returned from the pools REST API.
+type RestPool struct {
+ Name string `json:"name"`
+ StreamingURI string `json:"streamingUri"`
+ URI string `json:"uri"`
+}
+
+// Pools represents the collection of pools as returned from the REST API.
+type Pools struct {
+ ComponentsVersion map[string]string `json:"componentsVersion,omitempty"`
+ ImplementationVersion string `json:"implementationVersion"`
+ IsAdmin bool `json:"isAdminCreds"`
+ UUID string `json:"uuid"`
+ Pools []RestPool `json:"pools"`
+}
+
+// A Node is a computer in a cluster running the couchbase software.
+type Node struct {
+ ClusterCompatibility int `json:"clusterCompatibility"`
+ ClusterMembership string `json:"clusterMembership"`
+ CouchAPIBase string `json:"couchApiBase"`
+ Hostname string `json:"hostname"`
+ AlternateNames map[string]NodeAlternateNames `json:"alternateAddresses"`
+ InterestingStats map[string]float64 `json:"interestingStats,omitempty"`
+ MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"`
+ MCDMemoryReserved float64 `json:"mcdMemoryReserved"`
+ MemoryFree float64 `json:"memoryFree"`
+ MemoryTotal float64 `json:"memoryTotal"`
+ OS string `json:"os"`
+ Ports map[string]int `json:"ports"`
+ Services []string `json:"services"`
+ Status string `json:"status"`
+ Uptime int `json:"uptime,string"`
+ Version string `json:"version"`
+ ThisNode bool `json:"thisNode,omitempty"`
+}
+
+// A Pool of nodes and buckets.
+type Pool struct {
+ BucketMap map[string]*Bucket
+ Nodes []Node
+
+ BucketURL map[string]string `json:"buckets"`
+
+ MemoryQuota float64 `json:"memoryQuota"`
+ CbasMemoryQuota float64 `json:"cbasMemoryQuota"`
+ EventingMemoryQuota float64 `json:"eventingMemoryQuota"`
+ FtsMemoryQuota float64 `json:"ftsMemoryQuota"`
+ IndexMemoryQuota float64 `json:"indexMemoryQuota"`
+
+ client *Client
+}
+
+// VBucketServerMap is the a mapping of vbuckets to nodes.
+type VBucketServerMap struct {
+ HashAlgorithm string `json:"hashAlgorithm"`
+ NumReplicas int `json:"numReplicas"`
+ ServerList []string `json:"serverList"`
+ VBucketMap [][]int `json:"vBucketMap"`
+}
+
+type DurablitySettings struct {
+ Persist PersistTo
+ Observe ObserveTo
+}
+
+// Bucket is the primary entry point for most data operations.
+// Bucket is a locked data structure. All access to its fields should be done using read or write locking,
+// as appropriate.
+//
+// Some access methods require locking, but rely on the caller to do so. These are appropriate
+// for calls from methods that have already locked the structure. Methods like this
+// take a boolean parameter "bucketLocked".
+type Bucket struct {
+ sync.RWMutex
+ Capabilities []string `json:"bucketCapabilities"`
+ CapabilitiesVersion string `json:"bucketCapabilitiesVer"`
+ CollectionsManifestUid string `json:"collectionsManifestUid"`
+ Type string `json:"bucketType"`
+ Name string `json:"name"`
+ NodeLocator string `json:"nodeLocator"`
+ Quota map[string]float64 `json:"quota,omitempty"`
+ Replicas int `json:"replicaNumber"`
+ URI string `json:"uri"`
+ StreamingURI string `json:"streamingUri"`
+ LocalRandomKeyURI string `json:"localRandomKeyUri,omitempty"`
+ UUID string `json:"uuid"`
+ ConflictResolutionType string `json:"conflictResolutionType,omitempty"`
+ DDocs struct {
+ URI string `json:"uri"`
+ } `json:"ddocs,omitempty"`
+ BasicStats map[string]interface{} `json:"basicStats,omitempty"`
+ Controllers map[string]interface{} `json:"controllers,omitempty"`
+
+ // These are used for JSON IO, but isn't used for processing
+ // since it needs to be swapped out safely.
+ VBSMJson VBucketServerMap `json:"vBucketServerMap"`
+ NodesJSON []Node `json:"nodes"`
+
+ pool *Pool
+ connPools unsafe.Pointer // *[]*connectionPool
+ vBucketServerMap unsafe.Pointer // *VBucketServerMap
+ nodeList unsafe.Pointer // *[]Node
+ commonSufix string
+ ah AuthHandler // auth handler
+ ds *DurablitySettings // Durablity Settings for this bucket
+ closed bool
+
+ dedicatedPool bool // Set if the pool instance above caters to this Bucket alone
+}
+
+// PoolServices is all the bucket-independent services in a pool
+type PoolServices struct {
+ Rev int `json:"rev"`
+ NodesExt []NodeServices `json:"nodesExt"`
+ Capabilities json.RawMessage `json:"clusterCapabilities"`
+}
+
+// NodeServices is all the bucket-independent services running on
+// a node (given by Hostname)
+type NodeServices struct {
+ Services map[string]int `json:"services,omitempty"`
+ Hostname string `json:"hostname"`
+ ThisNode bool `json:"thisNode"`
+ AlternateNames map[string]NodeAlternateNames `json:"alternateAddresses"`
+}
+
+type NodeAlternateNames struct {
+ Hostname string `json:"hostname"`
+ Ports map[string]int `json:"ports"`
+}
+
+type BucketNotFoundError struct {
+ bucket string
+}
+
+func (e *BucketNotFoundError) Error() string {
+ return fmt.Sprint("No bucket named '" + e.bucket + "'")
+}
+
+type BucketAuth struct {
+ name string
+ pwd string
+ bucket string
+}
+
+func newBucketAuth(name string, pass string, bucket string) *BucketAuth {
+ return &BucketAuth{name: name, pwd: pass, bucket: bucket}
+}
+
+func (ba *BucketAuth) GetCredentials() (string, string, string) {
+ return ba.name, ba.pwd, ba.bucket
+}
+
+// VBServerMap returns the current VBucketServerMap.
+func (b *Bucket) VBServerMap() *VBucketServerMap {
+ b.RLock()
+ defer b.RUnlock()
+ ret := (*VBucketServerMap)(b.vBucketServerMap)
+ return ret
+}
+
+func (b *Bucket) GetVBmap(addrs []string) (map[string][]uint16, error) {
+ vbmap := b.VBServerMap()
+ servers := vbmap.ServerList
+ if addrs == nil {
+ addrs = vbmap.ServerList
+ }
+
+ m := make(map[string][]uint16)
+ for _, addr := range addrs {
+ m[addr] = make([]uint16, 0)
+ }
+ for vbno, idxs := range vbmap.VBucketMap {
+ if len(idxs) == 0 {
+ return nil, fmt.Errorf("vbmap: No KV node no for vb %d", vbno)
+ } else if idxs[0] < 0 || idxs[0] >= len(servers) {
+ return nil, fmt.Errorf("vbmap: Invalid KV node no %d for vb %d", idxs[0], vbno)
+ }
+ addr := servers[idxs[0]]
+ if _, ok := m[addr]; ok {
+ m[addr] = append(m[addr], uint16(vbno))
+ }
+ }
+ return m, nil
+}
+
+// true if node is not on the bucket VBmap
+func (b *Bucket) checkVBmap(node string) bool {
+ vbmap := b.VBServerMap()
+ servers := vbmap.ServerList
+
+ for _, idxs := range vbmap.VBucketMap {
+ if len(idxs) == 0 {
+ return true
+ } else if idxs[0] < 0 || idxs[0] >= len(servers) {
+ return true
+ }
+ if servers[idxs[0]] == node {
+ return false
+ }
+ }
+ return true
+}
+
+func (b *Bucket) GetName() string {
+ b.RLock()
+ defer b.RUnlock()
+ ret := b.Name
+ return ret
+}
+
+func (b *Bucket) GetUUID() string {
+ b.RLock()
+ defer b.RUnlock()
+ ret := b.UUID
+ return ret
+}
+
+// Nodes returns the current list of nodes servicing this bucket.
+func (b *Bucket) Nodes() []Node {
+ b.RLock()
+ defer b.RUnlock()
+ ret := *(*[]Node)(b.nodeList)
+ return ret
+}
+
+// return the list of healthy nodes
+func (b *Bucket) HealthyNodes() []Node {
+ nodes := []Node{}
+
+ for _, n := range b.Nodes() {
+ if n.Status == "healthy" && n.CouchAPIBase != "" {
+ nodes = append(nodes, n)
+ }
+ if n.Status != "healthy" { // log non-healthy node
+ logging.Infof("Non-healthy node; node details:")
+ logging.Infof("Hostname=%v, Status=%v, CouchAPIBase=%v, ThisNode=%v", n.Hostname, n.Status, n.CouchAPIBase, n.ThisNode)
+ }
+ }
+
+ return nodes
+}
+
+func (b *Bucket) getConnPools(bucketLocked bool) []*connectionPool {
+ if !bucketLocked {
+ b.RLock()
+ defer b.RUnlock()
+ }
+ if b.connPools != nil {
+ return *(*[]*connectionPool)(b.connPools)
+ } else {
+ return nil
+ }
+}
+
+func (b *Bucket) replaceConnPools(with []*connectionPool) {
+ b.Lock()
+ defer b.Unlock()
+
+ old := b.connPools
+ b.connPools = unsafe.Pointer(&with)
+ if old != nil {
+ for _, pool := range *(*[]*connectionPool)(old) {
+ if pool != nil {
+ pool.Close()
+ }
+ }
+ }
+ return
+}
+
+func (b *Bucket) getConnPool(i int) *connectionPool {
+
+ if i < 0 {
+ return nil
+ }
+
+ p := b.getConnPools(false /* not already locked */)
+ if len(p) > i {
+ return p[i]
+ }
+
+ return nil
+}
+
+func (b *Bucket) getConnPoolByHost(host string, bucketLocked bool) *connectionPool {
+ pools := b.getConnPools(bucketLocked)
+ for _, p := range pools {
+ if p != nil && p.host == host {
+ return p
+ }
+ }
+
+ return nil
+}
+
+// Given a vbucket number, returns a memcached connection to it.
+// The connection must be returned to its pool after use.
+func (b *Bucket) getConnectionToVBucket(vb uint32) (*memcached.Client, *connectionPool, error) {
+ for {
+ vbm := b.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ return nil, nil, fmt.Errorf("go-couchbase: vbmap smaller than vbucket list: %v vs. %v",
+ vb, vbm.VBucketMap)
+ }
+ masterId := vbm.VBucketMap[vb][0]
+ if masterId < 0 {
+ return nil, nil, fmt.Errorf("go-couchbase: No master for vbucket %d", vb)
+ }
+ pool := b.getConnPool(masterId)
+ conn, err := pool.Get()
+ if err != errClosedPool {
+ return conn, pool, err
+ }
+ // If conn pool was closed, because another goroutine refreshed the vbucket map, retry...
+ }
+}
+
+// To get random documents, we need to cover all the nodes, so select
+// a connection at random.
+
+func (b *Bucket) getRandomConnection() (*memcached.Client, *connectionPool, error) {
+ for {
+ var currentPool = 0
+ pools := b.getConnPools(false /* not already locked */)
+ if len(pools) == 0 {
+ return nil, nil, fmt.Errorf("No connection pool found")
+ } else if len(pools) > 1 { // choose a random connection
+ currentPool = rand.Intn(len(pools))
+ } // if only one pool, currentPool defaults to 0, i.e., the only pool
+
+ // get the pool
+ pool := pools[currentPool]
+ conn, err := pool.Get()
+ if err != errClosedPool {
+ return conn, pool, err
+ }
+
+ // If conn pool was closed, because another goroutine refreshed the vbucket map, retry...
+ }
+}
+
+//
+// Get a random document from a bucket. Since the bucket may be distributed
+// across nodes, we must first select a random connection, and then use the
+// Client.GetRandomDoc() call to get a random document from that node.
+//
+
+func (b *Bucket) GetRandomDoc(context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
+ // get a connection from the pool
+ conn, pool, err := b.getRandomConnection()
+
+ if err != nil {
+ return nil, err
+ }
+ conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
+
+ // We may need to select the bucket before GetRandomDoc()
+ // will work. This is sometimes done at startup (see defaultMkConn())
+ // but not always, depending on the auth type.
+ if conn.LastBucket() != b.Name {
+ _, err = conn.SelectBucket(b.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // get a randomm document from the connection
+ doc, err := conn.GetRandomDoc(context...)
+ // need to return the connection to the pool
+ pool.Return(conn)
+ return doc, err
+}
+
+// Bucket DDL
+func uriAdj(s string) string {
+ return strings.Replace(s, "%", "%25", -1)
+}
+
+func (b *Bucket) CreateScope(scope string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ args := map[string]interface{}{"name": scope}
+ return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes", args, nil)
+}
+
+func (b *Bucket) DropScope(scope string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes/"+uriAdj(scope), nil, nil)
+}
+
+func (b *Bucket) CreateCollection(scope string, collection string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ args := map[string]interface{}{"name": collection}
+ return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes/"+uriAdj(scope)+"/collections", args, nil)
+}
+
+func (b *Bucket) DropCollection(scope string, collection string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes/"+uriAdj(scope)+"/collections/"+uriAdj(collection), nil, nil)
+}
+
+func (b *Bucket) FlushCollection(scope string, collection string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ args := map[string]interface{}{"name": collection, "scope": scope}
+ return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections-flush", args, nil)
+}
+
+func (b *Bucket) getMasterNode(i int) string {
+ p := b.getConnPools(false /* not already locked */)
+ if len(p) > i {
+ return p[i].host
+ }
+ return ""
+}
+
+func (b *Bucket) authHandler(bucketLocked bool) (ah AuthHandler) {
+ if !bucketLocked {
+ b.RLock()
+ defer b.RUnlock()
+ }
+ pool := b.pool
+ name := b.Name
+
+ if pool != nil {
+ ah = pool.client.ah
+ }
+ if mbah, ok := ah.(MultiBucketAuthHandler); ok {
+ return mbah.ForBucket(name)
+ }
+ if ah == nil {
+ ah = &basicAuth{name, ""}
+ }
+ return
+}
+
+// NodeAddresses gets the (sorted) list of memcached node addresses
+// (hostname:port).
+func (b *Bucket) NodeAddresses() []string {
+ vsm := b.VBServerMap()
+ rv := make([]string, len(vsm.ServerList))
+ copy(rv, vsm.ServerList)
+ sort.Strings(rv)
+ return rv
+}
+
+// CommonAddressSuffix finds the longest common suffix of all
+// host:port strings in the node list.
+func (b *Bucket) CommonAddressSuffix() string {
+ input := []string{}
+ for _, n := range b.Nodes() {
+ input = append(input, n.Hostname)
+ }
+ return FindCommonSuffix(input)
+}
+
+// A Client is the starting point for all services across all buckets
+// in a Couchbase cluster.
+type Client struct {
+ BaseURL *url.URL
+ ah AuthHandler
+ Info Pools
+ tlsConfig *tls.Config
+}
+
+func maybeAddAuth(req *http.Request, ah AuthHandler) error {
+ if hah, ok := ah.(HTTPAuthHandler); ok {
+ return hah.SetCredsForRequest(req)
+ }
+ if ah != nil {
+ user, pass, _ := ah.GetCredentials()
+ req.Header.Set("Authorization", "Basic "+
+ base64.StdEncoding.EncodeToString([]byte(user+":"+pass)))
+ }
+ return nil
+}
+
+// arbitary number, may need to be tuned #FIXME
+const HTTP_MAX_RETRY = 5
+
+// Someday golang network packages will implement standard
+// error codes. Until then #sigh
+func isHttpConnError(err error) bool {
+
+ estr := err.Error()
+ return strings.Contains(estr, "broken pipe") ||
+ strings.Contains(estr, "broken connection") ||
+ strings.Contains(estr, "connection reset")
+}
+
+var client *http.Client
+var clientForStreaming *http.Client
+
+func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error) {
+ cfg := &tls.Config{}
+
+ if certFile != "" && keyFile != "" {
+ tlsCert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ cfg.Certificates = []tls.Certificate{tlsCert}
+ } else {
+ //error need to pass both certfile and keyfile
+ return nil, fmt.Errorf("N1QL: Need to pass both certfile and keyfile")
+ }
+
+ var caCert []byte
+ var err1 error
+
+ caCertPool := x509.NewCertPool()
+ if rootFile != "" {
+ // Read that value in
+ caCert, err1 = ioutil.ReadFile(rootFile)
+ if err1 != nil {
+ return nil, fmt.Errorf(" Error in reading cacert file, err: %v", err1)
+ }
+ caCertPool.AppendCertsFromPEM(caCert)
+ }
+
+ cfg.RootCAs = caCertPool
+ return cfg, nil
+}
+
+// This version of doHTTPRequest is for requests where the response connection is held open
+// for an extended duration since line is a new and significant output.
+//
+// The ordinary version of this method expects the results to arrive promptly, and
+// therefore use an HTTP client with a timeout. This client is not suitable
+// for streaming use.
+func doHTTPRequestForStreaming(req *http.Request) (*http.Response, error) {
+ var err error
+ var res *http.Response
+
+ // we need a client that ignores certificate errors, since we self-sign
+ // our certs
+ if clientForStreaming == nil && req.URL.Scheme == "https" {
+ var tr *http.Transport
+
+ if skipVerify {
+ tr = &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ MaxIdleConnsPerHost: MaxIdleConnsPerHost,
+ }
+ } else {
+ // Handle cases with cert
+
+ cfg, err := ClientConfigForX509(certFile, keyFile, rootFile)
+ if err != nil {
+ return nil, err
+ }
+
+ tr = &http.Transport{
+ TLSClientConfig: cfg,
+ MaxIdleConnsPerHost: MaxIdleConnsPerHost,
+ }
+ }
+
+ clientForStreaming = &http.Client{Transport: tr, Timeout: 0}
+
+ } else if clientForStreaming == nil {
+ clientForStreaming = HTTPClientForStreaming
+ }
+
+ for i := 0; i < HTTP_MAX_RETRY; i++ {
+ res, err = clientForStreaming.Do(req)
+ if err != nil && isHttpConnError(err) {
+ continue
+ }
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
+func doHTTPRequest(req *http.Request) (*http.Response, error) {
+
+ var err error
+ var res *http.Response
+
+ // we need a client that ignores certificate errors, since we self-sign
+ // our certs
+ if client == nil && req.URL.Scheme == "https" {
+ var tr *http.Transport
+
+ if skipVerify {
+ tr = &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ MaxIdleConnsPerHost: MaxIdleConnsPerHost,
+ }
+ } else {
+ // Handle cases with cert
+
+ cfg, err := ClientConfigForX509(certFile, keyFile, rootFile)
+ if err != nil {
+ return nil, err
+ }
+
+ tr = &http.Transport{
+ TLSClientConfig: cfg,
+ MaxIdleConnsPerHost: MaxIdleConnsPerHost,
+ }
+ }
+
+ client = &http.Client{Transport: tr, Timeout: ClientTimeOut}
+
+ } else if client == nil {
+ client = HTTPClient
+ }
+
+ for i := 0; i < HTTP_MAX_RETRY; i++ {
+ res, err = client.Do(req)
+ if err != nil && isHttpConnError(err) {
+ continue
+ }
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
+func doPutAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
+ return doOutputAPI("PUT", baseURL, path, params, authHandler, out, terse)
+}
+
+func doPostAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
+ return doOutputAPI("POST", baseURL, path, params, authHandler, out, terse)
+}
+
+func doDeleteAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
+ return doOutputAPI("DELETE", baseURL, path, params, authHandler, out, terse)
+}
+
+func doOutputAPI(
+ httpVerb string,
+ baseURL *url.URL,
+ path string,
+ params map[string]interface{},
+ authHandler AuthHandler,
+ out interface{},
+ terse bool) error {
+
+ var requestUrl string
+
+ if q := strings.Index(path, "?"); q > 0 {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path[:q] + "?" + path[q+1:]
+ } else {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path
+ }
+
+ postData := url.Values{}
+ for k, v := range params {
+ postData.Set(k, fmt.Sprintf("%v", v))
+ }
+
+ req, err := http.NewRequest(httpVerb, requestUrl, bytes.NewBufferString(postData.Encode()))
+ if err != nil {
+ return err
+ }
+
+ req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+
+ err = maybeAddAuth(req, authHandler)
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ defer res.Body.Close()
+ // 200 - ok, 202 - accepted (asynchronously)
+ if res.StatusCode != 200 && res.StatusCode != 202 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ if terse {
+ var outBuf interface{}
+
+ err := json.Unmarshal(bod, &outBuf)
+ if err == nil && outBuf != nil {
+ switch errText := outBuf.(type) {
+ case string:
+ return fmt.Errorf("%s", errText)
+ case map[string]interface{}:
+ errField := errText["errors"]
+ if errField != nil {
+
+ // remove annoying 'map' prefix
+ return fmt.Errorf("%s", strings.TrimPrefix(fmt.Sprintf("%v", errField), "map"))
+ }
+ }
+ }
+ return fmt.Errorf("%s", string(bod))
+ }
+ return fmt.Errorf("HTTP error %v getting %q: %s",
+ res.Status, requestUrl, bod)
+ }
+
+ d := json.NewDecoder(res.Body)
+ // PUT/POST/DELETE request may not have a response body
+ if d.More() {
+ if err = d.Decode(&out); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func queryRestAPI(
+ baseURL *url.URL,
+ path string,
+ authHandler AuthHandler,
+ out interface{},
+ terse bool) error {
+
+ var requestUrl string
+
+ if q := strings.Index(path, "?"); q > 0 {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path[:q] + "?" + path[q+1:]
+ } else {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path
+ }
+
+ req, err := http.NewRequest("GET", requestUrl, nil)
+ if err != nil {
+ return err
+ }
+
+ err = maybeAddAuth(req, authHandler)
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ if terse {
+ var outBuf interface{}
+
+ err := json.Unmarshal(bod, &outBuf)
+ if err == nil && outBuf != nil {
+ errText, ok := outBuf.(string)
+ if ok {
+ return fmt.Errorf(errText)
+ }
+ }
+ return fmt.Errorf(string(bod))
+ }
+ return fmt.Errorf("HTTP error %v getting %q: %s",
+ res.Status, requestUrl, bod)
+ }
+
+ d := json.NewDecoder(res.Body)
+ // GET request should have a response body
+ if err = d.Decode(&out); err != nil {
+ return fmt.Errorf("json decode err: %#v, for requestUrl: %s",
+ err, requestUrl)
+ }
+ return nil
+}
+
+func (c *Client) ProcessStream(path string, callb func(interface{}) error, data interface{}) error {
+ return c.processStream(c.BaseURL, path, c.ah, callb, data)
+}
+
+// Based on code in http://src.couchbase.org/source/xref/trunk/goproj/src/github.com/couchbase/indexing/secondary/dcp/pools.go#309
+func (c *Client) processStream(baseURL *url.URL, path string, authHandler AuthHandler, callb func(interface{}) error, data interface{}) error {
+ var requestUrl string
+
+ if q := strings.Index(path, "?"); q > 0 {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path[:q] + "?" + path[q+1:]
+ } else {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path
+ }
+
+ req, err := http.NewRequest("GET", requestUrl, nil)
+ if err != nil {
+ return err
+ }
+
+ err = maybeAddAuth(req, authHandler)
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequestForStreaming(req)
+ if err != nil {
+ return err
+ }
+
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ return fmt.Errorf("HTTP error %v getting %q: %s",
+ res.Status, requestUrl, bod)
+ }
+
+ reader := bufio.NewReader(res.Body)
+ for {
+ bs, err := reader.ReadBytes('\n')
+ if err != nil {
+ return err
+ }
+ if len(bs) == 1 && bs[0] == '\n' {
+ continue
+ }
+
+ err = json.Unmarshal(bs, data)
+ if err != nil {
+ return err
+ }
+ err = callb(data)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+
+}
+
+func (c *Client) parseURLResponse(path string, out interface{}) error {
+ return queryRestAPI(c.BaseURL, path, c.ah, out, false)
+}
+
+func (c *Client) parsePostURLResponse(path string, params map[string]interface{}, out interface{}) error {
+ return doPostAPI(c.BaseURL, path, params, c.ah, out, false)
+}
+
+func (c *Client) parsePostURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
+ return doPostAPI(c.BaseURL, path, params, c.ah, out, true)
+}
+
+func (c *Client) parseDeleteURLResponse(path string, params map[string]interface{}, out interface{}) error {
+ return doDeleteAPI(c.BaseURL, path, params, c.ah, out, false)
+}
+
+func (c *Client) parseDeleteURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
+ return doDeleteAPI(c.BaseURL, path, params, c.ah, out, true)
+}
+
+func (c *Client) parsePutURLResponse(path string, params map[string]interface{}, out interface{}) error {
+ return doPutAPI(c.BaseURL, path, params, c.ah, out, false)
+}
+
+func (c *Client) parsePutURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
+ return doPutAPI(c.BaseURL, path, params, c.ah, out, true)
+}
+
+func (b *Bucket) parseURLResponse(path string, out interface{}) error {
+ nodes := b.Nodes()
+ if len(nodes) == 0 {
+ return errors.New("no couch rest URLs")
+ }
+
+ // Pick a random node to start querying.
+ startNode := rand.Intn(len(nodes))
+ maxRetries := len(nodes)
+ for i := 0; i < maxRetries; i++ {
+ node := nodes[(startNode+i)%len(nodes)] // Wrap around the nodes list.
+ // Skip non-healthy nodes.
+ if node.Status != "healthy" || node.CouchAPIBase == "" {
+ continue
+ }
+ url := &url.URL{
+ Host: node.Hostname,
+ Scheme: "http",
+ }
+
+ // Lock here to avoid having pool closed under us.
+ b.RLock()
+ err := queryRestAPI(url, path, b.pool.client.ah, out, false)
+ b.RUnlock()
+ if err == nil {
+ return err
+ }
+ }
+ return errors.New("All nodes failed to respond or no healthy nodes for bucket found")
+}
+
+func (b *Bucket) parseAPIResponse(path string, out interface{}) error {
+ nodes := b.Nodes()
+ if len(nodes) == 0 {
+ return errors.New("no couch rest URLs")
+ }
+
+ var err error
+ var u *url.URL
+
+ // Pick a random node to start querying.
+ startNode := rand.Intn(len(nodes))
+ maxRetries := len(nodes)
+ for i := 0; i < maxRetries; i++ {
+ node := nodes[(startNode+i)%len(nodes)] // Wrap around the nodes list.
+ // Skip non-healthy nodes.
+ if node.Status != "healthy" || node.CouchAPIBase == "" {
+ continue
+ }
+
+ u, err = ParseURL(node.CouchAPIBase)
+ // Lock here so pool does not get closed under us.
+ b.RLock()
+ if err != nil {
+ b.RUnlock()
+ return fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
+ b.Name, i, node.CouchAPIBase, err)
+ } else if b.pool != nil {
+ u.User = b.pool.client.BaseURL.User
+ }
+ u.Path = path
+
+ // generate the path so that the strings are properly escaped
+ // MB-13770
+ requestPath := strings.Split(u.String(), u.Host)[1]
+
+ err = queryRestAPI(u, requestPath, b.pool.client.ah, out, false)
+ b.RUnlock()
+ if err == nil {
+ return err
+ }
+ }
+
+ var errStr string
+ if err != nil {
+ errStr = "Error " + err.Error()
+ }
+
+ return errors.New("All nodes failed to respond or returned error or no healthy nodes for bucket found." + errStr)
+}
+
+type basicAuth struct {
+ u, p string
+}
+
+func (b basicAuth) GetCredentials() (string, string, string) {
+ return b.u, b.p, b.u
+}
+
+func basicAuthFromURL(us string) (ah AuthHandler) {
+ u, err := ParseURL(us)
+ if err != nil {
+ return
+ }
+ if user := u.User; user != nil {
+ pw, _ := user.Password()
+ ah = basicAuth{user.Username(), pw}
+ }
+ return
+}
+
+// ConnectWithAuth connects to a couchbase cluster with the given
+// authentication handler.
+func ConnectWithAuth(baseU string, ah AuthHandler) (c Client, err error) {
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+ c.ah = ah
+
+ return c, c.parseURLResponse("/pools", &c.Info)
+}
+
+// Call this method with a TLS certificate file name to make communication
+// with the KV engine encrypted.
+//
+// This method should be called immediately after a Connect*() method.
+func (c *Client) InitTLS(certFile string) error {
+ serverCert, err := ioutil.ReadFile(certFile)
+ if err != nil {
+ return err
+ }
+ CA_Pool := x509.NewCertPool()
+ CA_Pool.AppendCertsFromPEM(serverCert)
+ c.tlsConfig = &tls.Config{RootCAs: CA_Pool}
+ return nil
+}
+
+func (c *Client) ClearTLS() {
+ c.tlsConfig = nil
+}
+
+// ConnectWithAuthCreds connects to a couchbase cluster with the give
+// authorization creds returned by cb_auth
+func ConnectWithAuthCreds(baseU, username, password string) (c Client, err error) {
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+
+ c.ah = newBucketAuth(username, password, "")
+ return c, c.parseURLResponse("/pools", &c.Info)
+}
+
+// Connect to a couchbase cluster. An authentication handler will be
+// created from the userinfo in the URL if provided.
+func Connect(baseU string) (Client, error) {
+ return ConnectWithAuth(baseU, basicAuthFromURL(baseU))
+}
+
+type BucketInfo struct {
+ Name string // name of bucket
+ User string // Username to use for access
+ Password string // SASL password of bucket
+}
+
+//Get SASL buckets
+func GetBucketList(baseU string) (bInfo []BucketInfo, err error) {
+
+ c := &Client{}
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+ c.ah = basicAuthFromURL(baseU)
+
+ var buckets []Bucket
+ err = c.parseURLResponse("/pools/default/buckets", &buckets)
+ if err != nil {
+ return
+ }
+ bInfo = make([]BucketInfo, 0)
+ for _, bucket := range buckets {
+ user, pass, _ := c.ah.GetCredentials()
+ bucketInfo := BucketInfo{Name: bucket.Name, User: user, Password: pass}
+ bInfo = append(bInfo, bucketInfo)
+ }
+ return bInfo, err
+}
+
+//Set viewUpdateDaemonOptions
+func SetViewUpdateParams(baseU string, params map[string]interface{}) (viewOpts map[string]interface{}, err error) {
+
+ c := &Client{}
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+ c.ah = basicAuthFromURL(baseU)
+
+ if len(params) < 1 {
+ return nil, fmt.Errorf("No params to set")
+ }
+
+ err = c.parsePostURLResponse("/settings/viewUpdateDaemon", params, &viewOpts)
+ if err != nil {
+ return
+ }
+ return viewOpts, err
+}
+
+// This API lets the caller know, if the list of nodes a bucket is
+// connected to has gone through an edit (a rebalance operation)
+// since the last update to the bucket, in which case a Refresh is
+// advised.
+func (b *Bucket) NodeListChanged() bool {
+ b.RLock()
+ pool := b.pool
+ uri := b.URI
+ b.RUnlock()
+
+ tmpb := &Bucket{}
+ err := pool.client.parseURLResponse(uri, tmpb)
+ if err != nil {
+ return true
+ }
+
+ bNodes := *(*[]Node)(b.nodeList)
+ if len(bNodes) != len(tmpb.NodesJSON) {
+ return true
+ }
+
+ bucketHostnames := map[string]bool{}
+ for _, node := range bNodes {
+ bucketHostnames[node.Hostname] = true
+ }
+
+ for _, node := range tmpb.NodesJSON {
+ if _, found := bucketHostnames[node.Hostname]; !found {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Sample data for scopes and collections as returned from the
+// /pooles/default/$BUCKET_NAME/collections API.
+// {"myScope2":{"myCollectionC":{}},"myScope1":{"myCollectionB":{},"myCollectionA":{}},"_default":{"_default":{}}}
+
+// Structures for parsing collections manifest.
+// The map key is the name of the scope.
+// Example data:
+// {"uid":"b","scopes":[
+// {"name":"_default","uid":"0","collections":[
+// {"name":"_default","uid":"0"}]},
+// {"name":"myScope1","uid":"8","collections":[
+// {"name":"myCollectionB","uid":"c"},
+// {"name":"myCollectionA","uid":"b"}]},
+// {"name":"myScope2","uid":"9","collections":[
+// {"name":"myCollectionC","uid":"d"}]}]}
+type InputManifest struct {
+ Uid string
+ Scopes []InputScope
+}
+type InputScope struct {
+ Name string
+ Uid string
+ Collections []InputCollection
+}
+type InputCollection struct {
+ Name string
+ Uid string
+}
+
+// Structures for storing collections information.
+type Manifest struct {
+ Uid uint64
+ Scopes map[string]*Scope // map by name
+}
+type Scope struct {
+ Name string
+ Uid uint64
+ Collections map[string]*Collection // map by name
+}
+type Collection struct {
+ Name string
+ Uid uint64
+}
+
+var _EMPTY_MANIFEST *Manifest = &Manifest{Uid: 0, Scopes: map[string]*Scope{}}
+
+func parseCollectionsManifest(res *gomemcached.MCResponse) (*Manifest, error) {
+ if !EnableCollections {
+ return _EMPTY_MANIFEST, nil
+ }
+
+ var im InputManifest
+ err := json.Unmarshal(res.Body, &im)
+ if err != nil {
+ return nil, err
+ }
+
+ uid, err := strconv.ParseUint(im.Uid, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ mani := &Manifest{Uid: uid, Scopes: make(map[string]*Scope, len(im.Scopes))}
+ for _, iscope := range im.Scopes {
+ scope_uid, err := strconv.ParseUint(iscope.Uid, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ scope := &Scope{Uid: scope_uid, Name: iscope.Name, Collections: make(map[string]*Collection, len(iscope.Collections))}
+ mani.Scopes[iscope.Name] = scope
+ for _, icoll := range iscope.Collections {
+ coll_uid, err := strconv.ParseUint(icoll.Uid, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ coll := &Collection{Uid: coll_uid, Name: icoll.Name}
+ scope.Collections[icoll.Name] = coll
+ }
+ }
+
+ return mani, nil
+}
+
+// This function assumes the bucket is locked.
+func (b *Bucket) GetCollectionsManifest() (*Manifest, error) {
+ // Collections not used?
+ if !EnableCollections {
+ return nil, fmt.Errorf("Collections not enabled.")
+ }
+
+ b.RLock()
+ pools := b.getConnPools(true /* already locked */)
+ if len(pools) == 0 {
+ b.RUnlock()
+ return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: no connection pool. No collections access to bucket %s.", b.Name)
+ }
+ pool := pools[0] // Any pool will do, so use the first one.
+ b.RUnlock()
+ client, err := pool.Get()
+ if err != nil {
+ return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name)
+ }
+ client.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
+
+ // We need to select the bucket before GetCollectionsManifest()
+ // will work. This is sometimes done at startup (see defaultMkConn())
+ // but not always, depending on the auth type.
+ // Doing this is safe because we collect the the connections
+ // by bucket, so the bucket being selected will never change.
+ _, err = client.SelectBucket(b.Name)
+ if err != nil {
+ pool.Return(client)
+ return nil, fmt.Errorf("Unable to select bucket %s: %v. No collections access to bucket %s.", err, b.Name, b.Name)
+ }
+
+ res, err := client.GetCollectionsManifest()
+ if err != nil {
+ pool.Return(client)
+ return nil, fmt.Errorf("Unable to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name)
+ }
+ mani, err := parseCollectionsManifest(res)
+ if err != nil {
+ pool.Return(client)
+ return nil, fmt.Errorf("Unable to parse collections manifest: %v. No collections access to bucket %s.", err, b.Name)
+ }
+
+ pool.Return(client)
+ return mani, nil
+}
+
+func (b *Bucket) RefreshFully() error {
+ return b.refresh(false)
+}
+
+func (b *Bucket) Refresh() error {
+ return b.refresh(true)
+}
+
+func (b *Bucket) refresh(preserveConnections bool) error {
+ b.RLock()
+ pool := b.pool
+ uri := b.URI
+ client := pool.client
+ b.RUnlock()
+
+ var poolServices PoolServices
+ var err error
+ if client.tlsConfig != nil {
+ poolServices, err = client.GetPoolServices("default")
+ if err != nil {
+ return err
+ }
+ }
+
+ tmpb := &Bucket{}
+ err = pool.client.parseURLResponse(uri, tmpb)
+ if err != nil {
+ return err
+ }
+
+ pools := b.getConnPools(false /* bucket not already locked */)
+
+ // We need this lock to ensure that bucket refreshes happening because
+ // of NMVb errors received during bulkGet do not end up over-writing
+ // pool.inUse.
+ b.Lock()
+
+ for _, pool := range pools {
+ if pool != nil {
+ pool.inUse = false
+ }
+ }
+
+ newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
+ for i := range newcps {
+ hostport := tmpb.VBSMJson.ServerList[i]
+ if preserveConnections {
+ pool := b.getConnPoolByHost(hostport, true /* bucket already locked */)
+ if pool != nil && pool.inUse == false && (!pool.encrypted || pool.tlsConfig == client.tlsConfig) {
+ // if the hostname and index is unchanged then reuse this pool
+ newcps[i] = pool
+ pool.inUse = true
+ continue
+ }
+ }
+
+ var encrypted bool
+ if client.tlsConfig != nil {
+ hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices)
+ if err != nil {
+ b.Unlock()
+ return err
+ }
+ }
+
+ if b.ah != nil {
+ newcps[i] = newConnectionPool(hostport,
+ b.ah, AsynchronousCloser, PoolSize, PoolOverflow, client.tlsConfig, b.Name, encrypted)
+
+ } else {
+ newcps[i] = newConnectionPool(hostport,
+ b.authHandler(true /* bucket already locked */),
+ AsynchronousCloser, PoolSize, PoolOverflow, client.tlsConfig, b.Name, encrypted)
+ }
+ }
+ b.replaceConnPools2(newcps, true /* bucket already locked */)
+ tmpb.ah = b.ah
+ b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson)
+ b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
+
+ b.Unlock()
+ return nil
+}
+
+func (p *Pool) refresh() (err error) {
+ p.BucketMap = make(map[string]*Bucket)
+
+ buckets := []Bucket{}
+ err = p.client.parseURLResponse(p.BucketURL["uri"], &buckets)
+ if err != nil {
+ return err
+ }
+ for i, _ := range buckets {
+ b := new(Bucket)
+ *b = buckets[i]
+ b.pool = p
+ b.nodeList = unsafe.Pointer(&b.NodesJSON)
+
+ // MB-33185 this is merely defensive, just in case
+ // refresh() gets called on a perfectly node pool
+ ob, ok := p.BucketMap[b.Name]
+ if ok && ob.connPools != nil {
+ ob.Close()
+ }
+ b.replaceConnPools(make([]*connectionPool, len(b.VBSMJson.ServerList)))
+ p.BucketMap[b.Name] = b
+ runtime.SetFinalizer(b, bucketFinalizer)
+ }
+ buckets = nil
+ return nil
+}
+
+// GetPool gets a pool from within the couchbase cluster (usually
+// "default").
+func (c *Client) GetPool(name string) (p Pool, err error) {
+ var poolURI string
+
+ for _, p := range c.Info.Pools {
+ if p.Name == name {
+ poolURI = p.URI
+ break
+ }
+ }
+ if poolURI == "" {
+ return p, errors.New("No pool named " + name)
+ }
+
+ err = c.parseURLResponse(poolURI, &p)
+ if err != nil {
+ return p, err
+ }
+
+ p.client = c
+
+ err = p.refresh()
+ return
+}
+
+func (c *Client) setupPoolForBucket(poolname, bucketname string) (p Pool, err error) {
+ var poolURI string
+
+ for _, p := range c.Info.Pools {
+ if p.Name == poolname {
+ poolURI = p.URI
+ break
+ }
+ }
+ if poolURI == "" {
+ return p, errors.New("No pool named " + poolname)
+ }
+
+ err = c.parseURLResponse(poolURI, &p)
+ if err != nil {
+ return p, err
+ }
+
+ p.client = c
+ p.BucketMap = make(map[string]*Bucket)
+
+ buckets := []Bucket{}
+ err = p.client.parseURLResponse(p.BucketURL["uri"], &buckets)
+ if err != nil {
+ return
+ }
+ for i, _ := range buckets {
+ if buckets[i].Name == bucketname {
+ b := new(Bucket)
+ *b = buckets[i]
+ b.pool = &p
+ b.nodeList = unsafe.Pointer(&b.NodesJSON)
+ b.replaceConnPools(make([]*connectionPool, len(b.VBSMJson.ServerList)))
+ p.BucketMap[b.Name] = b
+ runtime.SetFinalizer(b, bucketFinalizer)
+ break
+ }
+ }
+ buckets = nil
+
+ return
+}
+
+// GetPoolServices returns all the bucket-independent services in a pool.
+// (See "Exposing services outside of bucket context" in http://goo.gl/uuXRkV)
+func (c *Client) GetPoolServices(name string) (ps PoolServices, err error) {
+ var poolName string
+ for _, p := range c.Info.Pools {
+ if p.Name == name {
+ poolName = p.Name
+ }
+ }
+ if poolName == "" {
+ return ps, errors.New("No pool named " + name)
+ }
+
+ poolURI := "/pools/" + poolName + "/nodeServices"
+ err = c.parseURLResponse(poolURI, &ps)
+
+ return
+}
+
+func (b *Bucket) GetPoolServices(name string) (*PoolServices, error) {
+ b.RLock()
+ pool := b.pool
+ b.RUnlock()
+
+ ps, err := pool.client.GetPoolServices(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ps, nil
+}
+
+// Close marks this bucket as no longer needed, closing connections it
+// may have open.
+func (b *Bucket) Close() {
+ b.Lock()
+ defer b.Unlock()
+ if b.connPools != nil {
+ for _, c := range b.getConnPools(true /* already locked */) {
+ if c != nil {
+ c.Close()
+ }
+ }
+ b.connPools = nil
+
+ // Close the associated pool asynchronously which acquires the
+ // bucket lock separately.
+ if b.dedicatedPool {
+ go b.pool.Close()
+ }
+ }
+}
+
+func bucketFinalizer(b *Bucket) {
+ if b.connPools != nil {
+ if !b.closed {
+ logging.Warnf("Finalizing a bucket with active connections.")
+ }
+
+ // MB-33185 do not leak connection pools
+ b.Close()
+ }
+}
+
+// GetBucket gets a bucket from within this pool.
+func (p *Pool) GetBucket(name string) (*Bucket, error) {
+ rv, ok := p.BucketMap[name]
+ if !ok {
+ return nil, &BucketNotFoundError{bucket: name}
+ }
+ err := rv.Refresh()
+ if err != nil {
+ return nil, err
+ }
+ return rv, nil
+}
+
+// GetBucket gets a bucket from within this pool.
+func (p *Pool) GetBucketWithAuth(bucket, username, password string) (*Bucket, error) {
+ rv, ok := p.BucketMap[bucket]
+ if !ok {
+ return nil, &BucketNotFoundError{bucket: bucket}
+ }
+
+ rv.ah = newBucketAuth(username, password, bucket)
+ err := rv.Refresh()
+ if err != nil {
+ return nil, err
+ }
+ return rv, nil
+}
+
+// GetPool gets the pool to which this bucket belongs.
+func (b *Bucket) GetPool() *Pool {
+ b.RLock()
+ defer b.RUnlock()
+ ret := b.pool
+ return ret
+}
+
+// GetClient gets the client from which we got this pool.
+func (p *Pool) GetClient() *Client {
+ return p.client
+}
+
+// Release bucket connections when the pool is no longer in use
+func (p *Pool) Close() {
+
+ // MB-36186 make the bucket map inaccessible
+ bucketMap := p.BucketMap
+ p.BucketMap = nil
+
+ // fine to loop through the buckets unlocked
+ // locking happens at the bucket level
+ for b, _ := range bucketMap {
+
+ // MB-36186 make the bucket unreachable and avoid concurrent read/write map panics
+ bucket := bucketMap[b]
+ bucketMap[b] = nil
+
+ bucket.Lock()
+
+ // MB-33208 defer closing connection pools until the bucket is no longer used
+ // MB-36186 if the bucket is unused make it unreachable straight away
+ needClose := bucket.connPools == nil && !bucket.closed
+ if needClose {
+ runtime.SetFinalizer(&bucket, nil)
+ }
+ bucket.closed = true
+ bucket.Unlock()
+ if needClose {
+ bucket.Close()
+ }
+ }
+}
+
+// GetBucket is a convenience function for getting a named bucket from
+// a URL
+func GetBucket(endpoint, poolname, bucketname string) (*Bucket, error) {
+ var err error
+ client, err := Connect(endpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ pool, err := client.setupPoolForBucket(poolname, bucketname)
+ if err != nil {
+ return nil, err
+ }
+
+ bucket, err := pool.GetBucket(bucketname)
+ if err != nil {
+ // close dedicated pool on error
+ pool.Close()
+ return nil, err
+ }
+
+ bucket.dedicatedPool = true
+ return bucket, nil
+}
+
+// ConnectWithAuthAndGetBucket is a convenience function for
+// getting a named bucket from a given URL and an auth callback
+func ConnectWithAuthAndGetBucket(endpoint, poolname, bucketname string,
+ ah AuthHandler) (*Bucket, error) {
+ client, err := ConnectWithAuth(endpoint, ah)
+ if err != nil {
+ return nil, err
+ }
+
+ pool, err := client.setupPoolForBucket(poolname, bucketname)
+ if err != nil {
+ return nil, err
+ }
+
+ bucket, err := pool.GetBucket(bucketname)
+ if err != nil {
+ // close dedicated pool on error
+ pool.Close()
+ return nil, err
+ }
+
+ bucket.dedicatedPool = true
+ return bucket, nil
+}
+
+func GetSystemBucket(c *Client, p *Pool, name string) (*Bucket, error) {
+ bucket, err := p.GetBucket(name)
+ if err != nil {
+ if _, ok := err.(*BucketNotFoundError); !ok {
+ return nil, err
+ }
+
+ // create the bucket if not found
+ args := map[string]interface{}{
+ "bucketType": "couchbase",
+ "name": name,
+ "ramQuotaMB": 100,
+ }
+ var ret interface{}
+ // allow "bucket already exists" error in case duplicate create
+ // (e.g. two query nodes starting at same time)
+ err = c.parsePostURLResponseTerse("/pools/default/buckets", args, &ret)
+ if err != nil && !AlreadyExistsError(err) {
+ return nil, err
+ }
+
+ // bucket created asynchronously, try to get the bucket
+ maxRetry := 8
+ interval := 100 * time.Millisecond
+ for i := 0; i < maxRetry; i++ {
+ time.Sleep(interval)
+ interval *= 2
+ err = p.refresh()
+ if err != nil {
+ return nil, err
+ }
+ bucket, err = p.GetBucket(name)
+ if bucket != nil {
+ bucket.RLock()
+ ok := !bucket.closed && len(bucket.getConnPools(true /* already locked */)) > 0
+ bucket.RUnlock()
+ if ok {
+ break
+ }
+ } else if err != nil {
+ if _, ok := err.(*BucketNotFoundError); !ok {
+ break
+ }
+ }
+ }
+ }
+
+ return bucket, err
+}
+
+func DropSystemBucket(c *Client, name string) error {
+ err := c.parseDeleteURLResponseTerse("/pools/default/buckets/"+name, nil, nil)
+ return err
+}
+
+func AlreadyExistsError(err error) bool {
+ // Bucket error: Bucket with given name already exists
+ // Scope error: Scope with this name already exists
+ // Collection error: Collection with this name already exists
+ return strings.Contains(err.Error(), " name already exists")
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/port_map.go b/vendor/github.com/couchbase/go-couchbase/port_map.go
new file mode 100644
index 00000000..864bd4ae
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/port_map.go
@@ -0,0 +1,106 @@
+package couchbase
+
+/*
+
+The goal here is to map a hostname:port combination to another hostname:port
+combination. The original hostname:port gives the name and regular KV port
+of a couchbase server. We want to determine the corresponding SSL KV port.
+
+To do this, we have a pool services structure, as obtained from
+the /pools/default/nodeServices API.
+
+For a fully configured two-node system, the structure may look like this:
+{"rev":32,"nodesExt":[
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211},"hostname":"172.23.123.101"},
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":true,"hostname":"172.23.123.102"}]}
+
+In this case, note the "hostname" fields, and the "kv" and "kvSSL" fields.
+
+For a single-node system, perhaps brought up for testing, the structure may look like this:
+{"rev":66,"nodesExt":[
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}
+
+Here, note that there is only a single entry in the "nodeExt" array and that it does not have a "hostname" field.
+We will assume that either hostname fields are present, or there is only a single node.
+*/
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "strconv"
+)
+
+func ParsePoolServices(jsonInput string) (*PoolServices, error) {
+ ps := &PoolServices{}
+ err := json.Unmarshal([]byte(jsonInput), ps)
+ return ps, err
+}
+
+// Accepts a "host:port" string representing the KV TCP port and the pools
+// nodeServices payload and returns a host:port string representing the KV
+// TLS port on the same node as the KV TCP port.
+// Returns the original host:port if in case of local communication (services
+// on the same node as source)
+func MapKVtoSSL(hostport string, ps *PoolServices) (string, bool, error) {
+ return MapKVtoSSLExt(hostport, ps, false)
+}
+
+func MapKVtoSSLExt(hostport string, ps *PoolServices, force bool) (string, bool, error) {
+ host, port, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return "", false, fmt.Errorf("Unable to split hostport %s: %v", hostport, err)
+ }
+
+ portInt, err := strconv.Atoi(port)
+ if err != nil {
+ return "", false, fmt.Errorf("Unable to parse host/port combination %s: %v", hostport, err)
+ }
+
+ var ns *NodeServices
+ for i := range ps.NodesExt {
+ hostname := ps.NodesExt[i].Hostname
+ if len(hostname) != 0 && hostname != host {
+ /* If the hostname is the empty string, it means the node (and by extension
+ the cluster) is configured on the loopback. Further, it means that the client
+ should use whatever hostname it used to get the nodeServices information in
+ the first place to access the cluster. Thus, when the hostname is empty in
+ the nodeService entry we can assume that client will use the hostname it used
+ to access the KV TCP endpoint - and thus that it automatically "matches".
+ If hostname is not empty and doesn't match then we move to the next entry.
+ */
+ continue
+ }
+ kvPort, found := ps.NodesExt[i].Services["kv"]
+ if !found {
+ /* not a node with a KV service */
+ continue
+ }
+ if kvPort == portInt {
+ ns = &(ps.NodesExt[i])
+ break
+ }
+ }
+
+ if ns == nil {
+ return "", false, fmt.Errorf("Unable to parse host/port combination %s: no matching node found among %d", hostport, len(ps.NodesExt))
+ }
+ kvSSL, found := ns.Services["kvSSL"]
+ if !found {
+ return "", false, fmt.Errorf("Unable to map host/port combination %s: target host has no kvSSL port listed", hostport)
+ }
+
+ //Don't encrypt for communication between local nodes
+ if !force && (len(ns.Hostname) == 0 || ns.ThisNode) {
+ return hostport, false, nil
+ }
+
+ ip := net.ParseIP(host)
+ if ip != nil && ip.To4() == nil && ip.To16() != nil { // IPv6 and not a FQDN
+ // Prefix and suffix square brackets as SplitHostPort removes them,
+ // see: https://golang.org/pkg/net/#SplitHostPort
+ host = "[" + host + "]"
+ }
+
+ return fmt.Sprintf("%s:%d", host, kvSSL), true, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/streaming.go b/vendor/github.com/couchbase/go-couchbase/streaming.go
new file mode 100644
index 00000000..e66ef250
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/streaming.go
@@ -0,0 +1,224 @@
+package couchbase
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/couchbase/goutils/logging"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "time"
+ "unsafe"
+)
+
+// Bucket auto-updater gets the latest version of the bucket config from
+// the server. If the configuration has changed then updated the local
+// bucket information. If the bucket has been deleted then notify anyone
+// who is holding a reference to this bucket
+
+const MAX_RETRY_COUNT = 5
+const DISCONNECT_PERIOD = 120 * time.Second
+
+type NotifyFn func(bucket string, err error)
+type StreamingFn func(bucket *Bucket)
+
+// Use TCP keepalive to detect half close sockets
+var updaterTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+}
+
+var updaterHTTPClient = &http.Client{Transport: updaterTransport}
+
+func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) {
+
+ var err error
+ var res *http.Response
+
+ for i := 0; i < HTTP_MAX_RETRY; i++ {
+ res, err = updaterHTTPClient.Do(req)
+ if err != nil && isHttpConnError(err) {
+ continue
+ }
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
+func (b *Bucket) RunBucketUpdater(notify NotifyFn) {
+ b.RunBucketUpdater2(nil, notify)
+}
+
+func (b *Bucket) RunBucketUpdater2(streamingFn StreamingFn, notify NotifyFn) {
+ go func() {
+ err := b.UpdateBucket2(streamingFn)
+ if err != nil {
+ if notify != nil {
+ notify(b.GetName(), err)
+ }
+ logging.Errorf(" Bucket Updater exited with err %v", err)
+ }
+ }()
+}
+
+func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) {
+ if !bucketLocked {
+ b.Lock()
+ defer b.Unlock()
+ }
+ old := b.connPools
+ b.connPools = unsafe.Pointer(&with)
+ if old != nil {
+ for _, pool := range *(*[]*connectionPool)(old) {
+ if pool != nil && pool.inUse == false {
+ pool.Close()
+ }
+ }
+ }
+ return
+}
+
+func (b *Bucket) UpdateBucket() error {
+ return b.UpdateBucket2(nil)
+}
+
+func (b *Bucket) UpdateBucket2(streamingFn StreamingFn) error {
+ var failures int
+ var returnErr error
+ var poolServices PoolServices
+
+ for {
+
+ if failures == MAX_RETRY_COUNT {
+ logging.Errorf(" Maximum failures reached. Exiting loop...")
+ return fmt.Errorf("Max failures reached. Last Error %v", returnErr)
+ }
+
+ nodes := b.Nodes()
+ if len(nodes) < 1 {
+ return fmt.Errorf("No healthy nodes found")
+ }
+
+ streamUrl := fmt.Sprintf("%s/pools/default/bucketsStreaming/%s", b.pool.client.BaseURL, uriAdj(b.GetName()))
+ logging.Infof(" Trying with %s", streamUrl)
+ req, err := http.NewRequest("GET", streamUrl, nil)
+ if err != nil {
+ return err
+ }
+
+ // Lock here to avoid having pool closed under us.
+ b.RLock()
+ err = maybeAddAuth(req, b.pool.client.ah)
+ b.RUnlock()
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequestForUpdate(req)
+ if err != nil {
+ return err
+ }
+
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ logging.Errorf("Failed to connect to host, unexpected status code: %v. Body %s", res.StatusCode, bod)
+ res.Body.Close()
+ returnErr = fmt.Errorf("Failed to connect to host. Status %v Body %s", res.StatusCode, bod)
+ failures++
+ continue
+ }
+
+ dec := json.NewDecoder(res.Body)
+
+ tmpb := &Bucket{}
+ for {
+
+ err := dec.Decode(&tmpb)
+ if err != nil {
+ returnErr = err
+ res.Body.Close()
+ break
+ }
+
+ // if we got here, reset failure count
+ failures = 0
+
+ if b.pool.client.tlsConfig != nil {
+ poolServices, err = b.pool.client.GetPoolServices("default")
+ if err != nil {
+ returnErr = err
+ res.Body.Close()
+ break
+ }
+ }
+
+ b.Lock()
+
+ // mark all the old connection pools for deletion
+ pools := b.getConnPools(true /* already locked */)
+ for _, pool := range pools {
+ if pool != nil {
+ pool.inUse = false
+ }
+ }
+
+ newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
+ for i := range newcps {
+ // get the old connection pool and check if it is still valid
+ pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
+ if pool != nil && pool.inUse == false && pool.tlsConfig == b.pool.client.tlsConfig {
+ // if the hostname and index is unchanged then reuse this pool
+ newcps[i] = pool
+ pool.inUse = true
+ continue
+ }
+ // else create a new pool
+ var encrypted bool
+ hostport := tmpb.VBSMJson.ServerList[i]
+ if b.pool.client.tlsConfig != nil {
+ hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices)
+ if err != nil {
+ b.Unlock()
+ return err
+ }
+ }
+ if b.ah != nil {
+ newcps[i] = newConnectionPool(hostport,
+ b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted)
+
+ } else {
+ newcps[i] = newConnectionPool(hostport,
+ b.authHandler(true /* bucket already locked */),
+ false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted)
+ }
+ }
+
+ b.replaceConnPools2(newcps, true /* bucket already locked */)
+
+ tmpb.ah = b.ah
+ b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson)
+ b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
+ b.Unlock()
+
+ if streamingFn != nil {
+ streamingFn(tmpb)
+ }
+ logging.Debugf("Got new configuration for bucket %s", b.GetName())
+
+ }
+ // we are here because of an error
+ failures++
+ continue
+
+ }
+ return nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/tap.go b/vendor/github.com/couchbase/go-couchbase/tap.go
new file mode 100644
index 00000000..86edd305
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/tap.go
@@ -0,0 +1,143 @@
+package couchbase
+
+import (
+ "github.com/couchbase/gomemcached/client"
+ "github.com/couchbase/goutils/logging"
+ "sync"
+ "time"
+)
+
+const initialRetryInterval = 1 * time.Second
+const maximumRetryInterval = 30 * time.Second
+
+// A TapFeed streams mutation events from a bucket.
+//
+// Events from the bucket can be read from the channel 'C'. Remember
+// to call Close() on it when you're done, unless its channel has
+// closed itself already.
+type TapFeed struct {
+ C <-chan memcached.TapEvent
+
+ bucket *Bucket
+ args *memcached.TapArguments
+ nodeFeeds []*memcached.TapFeed // The TAP feeds of the individual nodes
+ output chan memcached.TapEvent // Same as C but writeably-typed
+ wg sync.WaitGroup
+ quit chan bool
+}
+
+// StartTapFeed creates and starts a new Tap feed
+func (b *Bucket) StartTapFeed(args *memcached.TapArguments) (*TapFeed, error) {
+ if args == nil {
+ defaultArgs := memcached.DefaultTapArguments()
+ args = &defaultArgs
+ }
+
+ feed := &TapFeed{
+ bucket: b,
+ args: args,
+ output: make(chan memcached.TapEvent, 10),
+ quit: make(chan bool),
+ }
+
+ go feed.run()
+
+ feed.C = feed.output
+ return feed, nil
+}
+
+// Goroutine that runs the feed
+func (feed *TapFeed) run() {
+ retryInterval := initialRetryInterval
+ bucketOK := true
+ for {
+ // Connect to the TAP feed of each server node:
+ if bucketOK {
+ killSwitch, err := feed.connectToNodes()
+ if err == nil {
+ // Run until one of the sub-feeds fails:
+ select {
+ case <-killSwitch:
+ case <-feed.quit:
+ return
+ }
+ feed.closeNodeFeeds()
+ retryInterval = initialRetryInterval
+ }
+ }
+
+ // On error, try to refresh the bucket in case the list of nodes changed:
+ logging.Infof("go-couchbase: TAP connection lost; reconnecting to bucket %q in %v",
+ feed.bucket.Name, retryInterval)
+ err := feed.bucket.Refresh()
+ bucketOK = err == nil
+
+ select {
+ case <-time.After(retryInterval):
+ case <-feed.quit:
+ return
+ }
+ if retryInterval *= 2; retryInterval > maximumRetryInterval {
+ retryInterval = maximumRetryInterval
+ }
+ }
+}
+
+func (feed *TapFeed) connectToNodes() (killSwitch chan bool, err error) {
+ killSwitch = make(chan bool)
+ for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) {
+ var singleFeed *memcached.TapFeed
+ singleFeed, err = serverConn.StartTapFeed(feed.args)
+ if err != nil {
+ logging.Errorf("go-couchbase: Error connecting to tap feed of %s: %v", serverConn.host, err)
+ feed.closeNodeFeeds()
+ return
+ }
+ feed.nodeFeeds = append(feed.nodeFeeds, singleFeed)
+ go feed.forwardTapEvents(singleFeed, killSwitch, serverConn.host)
+ feed.wg.Add(1)
+ }
+ return
+}
+
+// Goroutine that forwards Tap events from a single node's feed to the aggregate feed.
+func (feed *TapFeed) forwardTapEvents(singleFeed *memcached.TapFeed, killSwitch chan bool, host string) {
+ defer feed.wg.Done()
+ for {
+ select {
+ case event, ok := <-singleFeed.C:
+ if !ok {
+ if singleFeed.Error != nil {
+ logging.Errorf("go-couchbase: Tap feed from %s failed: %v", host, singleFeed.Error)
+ }
+ killSwitch <- true
+ return
+ }
+ feed.output <- event
+ case <-feed.quit:
+ return
+ }
+ }
+}
+
+func (feed *TapFeed) closeNodeFeeds() {
+ for _, f := range feed.nodeFeeds {
+ f.Close()
+ }
+ feed.nodeFeeds = nil
+}
+
+// Close a Tap feed.
+func (feed *TapFeed) Close() error {
+ select {
+ case <-feed.quit:
+ return nil
+ default:
+ }
+
+ feed.closeNodeFeeds()
+ close(feed.quit)
+ feed.wg.Wait()
+ close(feed.output)
+ return nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/upr.go b/vendor/github.com/couchbase/go-couchbase/upr.go
new file mode 100644
index 00000000..844bf915
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/upr.go
@@ -0,0 +1,399 @@
+package couchbase
+
+import (
+ "log"
+ "sync"
+ "time"
+
+ "fmt"
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/client"
+ "github.com/couchbase/goutils/logging"
+)
+
+// A UprFeed streams mutation events from a bucket.
+//
+// Events from the bucket can be read from the channel 'C'. Remember
+// to call Close() on it when you're done, unless its channel has
+// closed itself already.
+type UprFeed struct {
+ C <-chan *memcached.UprEvent
+
+ bucket *Bucket
+ nodeFeeds map[string]*FeedInfo // The UPR feeds of the individual nodes
+ output chan *memcached.UprEvent // Same as C but writeably-typed
+ outputClosed bool
+ quit chan bool
+ name string // name of this UPR feed
+ sequence uint32 // sequence number for this feed
+ connected bool
+ killSwitch chan bool
+ closing bool
+ wg sync.WaitGroup
+ dcp_buffer_size uint32
+ data_chan_size int
+}
+
+// UprFeed from a single connection
+type FeedInfo struct {
+ uprFeed *memcached.UprFeed // UPR feed handle
+ host string // hostname
+ connected bool // connected
+ quit chan bool // quit channel
+}
+
+type FailoverLog map[uint16]memcached.FailoverLog
+
+// GetFailoverLogs, get the failover logs for a set of vbucket ids
+func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) {
+
+ // map vbids to their corresponding hosts
+ vbHostList := make(map[string][]uint16)
+ vbm := b.VBServerMap()
+ if len(vbm.VBucketMap) < len(vBuckets) {
+ return nil, fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
+ vbm.VBucketMap, vBuckets)
+ }
+
+ for _, vb := range vBuckets {
+ masterID := vbm.VBucketMap[vb][0]
+ master := b.getMasterNode(masterID)
+ if master == "" {
+ return nil, fmt.Errorf("No master found for vb %d", vb)
+ }
+
+ vbList := vbHostList[master]
+ if vbList == nil {
+ vbList = make([]uint16, 0)
+ }
+ vbList = append(vbList, vb)
+ vbHostList[master] = vbList
+ }
+
+ failoverLogMap := make(FailoverLog)
+ for _, serverConn := range b.getConnPools(false /* not already locked */) {
+
+ vbList := vbHostList[serverConn.host]
+ if vbList == nil {
+ continue
+ }
+
+ mc, err := serverConn.Get()
+ if err != nil {
+ logging.Infof("No Free connections for vblist %v", vbList)
+ return nil, fmt.Errorf("No Free connections for host %s",
+ serverConn.host)
+
+ }
+ // close the connection so that it doesn't get reused for upr data
+ // connection
+ defer mc.Close()
+ mc.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
+ failoverlogs, err := mc.UprGetFailoverLog(vbList)
+ if err != nil {
+ return nil, fmt.Errorf("Error getting failover log %s host %s",
+ err.Error(), serverConn.host)
+
+ }
+
+ for vb, log := range failoverlogs {
+ failoverLogMap[vb] = *log
+ }
+ }
+
+ return failoverLogMap, nil
+}
+
+func (b *Bucket) StartUprFeed(name string, sequence uint32) (*UprFeed, error) {
+ return b.StartUprFeedWithConfig(name, sequence, 10, DEFAULT_WINDOW_SIZE)
+}
+
+// StartUprFeed creates and starts a new Upr feed
+// No data will be sent on the channel unless vbuckets streams are requested
+func (b *Bucket) StartUprFeedWithConfig(name string, sequence uint32, data_chan_size int, dcp_buffer_size uint32) (*UprFeed, error) {
+
+ feed := &UprFeed{
+ bucket: b,
+ output: make(chan *memcached.UprEvent, data_chan_size),
+ quit: make(chan bool),
+ nodeFeeds: make(map[string]*FeedInfo, 0),
+ name: name,
+ sequence: sequence,
+ killSwitch: make(chan bool),
+ dcp_buffer_size: dcp_buffer_size,
+ data_chan_size: data_chan_size,
+ }
+
+ err := feed.connectToNodes()
+ if err != nil {
+ return nil, fmt.Errorf("Cannot connect to bucket %s", err.Error())
+ }
+ feed.connected = true
+ go feed.run()
+
+ feed.C = feed.output
+ return feed, nil
+}
+
+// UprRequestStream starts a stream for a vb on a feed
+func (feed *UprFeed) UprRequestStream(vb uint16, opaque uint16, flags uint32,
+ vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Panicf("Panic in UprRequestStream. Feed %v Bucket %v", feed, feed.bucket)
+ }
+ }()
+
+ vbm := feed.bucket.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
+ vb, vbm.VBucketMap)
+ }
+
+ if int(vb) >= len(vbm.VBucketMap) {
+ return fmt.Errorf("Invalid vbucket id %d", vb)
+ }
+
+ masterID := vbm.VBucketMap[vb][0]
+ master := feed.bucket.getMasterNode(masterID)
+ if master == "" {
+ return fmt.Errorf("Master node not found for vbucket %d", vb)
+ }
+ singleFeed := feed.nodeFeeds[master]
+ if singleFeed == nil {
+ return fmt.Errorf("UprFeed for this host not found")
+ }
+
+ if err := singleFeed.uprFeed.UprRequestStream(vb, opaque, flags,
+ vuuid, startSequence, endSequence, snapStart, snapEnd); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UprCloseStream ends a vbucket stream.
+func (feed *UprFeed) UprCloseStream(vb, opaqueMSB uint16) error {
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Panicf("Panic in UprCloseStream. Feed %v Bucket %v ", feed, feed.bucket)
+ }
+ }()
+
+ vbm := feed.bucket.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
+ vb, vbm.VBucketMap)
+ }
+
+ if int(vb) >= len(vbm.VBucketMap) {
+ return fmt.Errorf("Invalid vbucket id %d", vb)
+ }
+
+ masterID := vbm.VBucketMap[vb][0]
+ master := feed.bucket.getMasterNode(masterID)
+ if master == "" {
+ return fmt.Errorf("Master node not found for vbucket %d", vb)
+ }
+ singleFeed := feed.nodeFeeds[master]
+ if singleFeed == nil {
+ return fmt.Errorf("UprFeed for this host not found")
+ }
+
+ if err := singleFeed.uprFeed.CloseStream(vb, opaqueMSB); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Goroutine that runs the feed
+func (feed *UprFeed) run() {
+ retryInterval := initialRetryInterval
+ bucketOK := true
+ for {
+ // Connect to the UPR feed of each server node:
+ if bucketOK {
+ // Run until one of the sub-feeds fails:
+ select {
+ case <-feed.killSwitch:
+ case <-feed.quit:
+ return
+ }
+ //feed.closeNodeFeeds()
+ retryInterval = initialRetryInterval
+ }
+
+ if feed.closing == true {
+ // we have been asked to shut down
+ return
+ }
+
+ // On error, try to refresh the bucket in case the list of nodes changed:
+ logging.Infof("go-couchbase: UPR connection lost; reconnecting to bucket %q in %v",
+ feed.bucket.Name, retryInterval)
+
+ if err := feed.bucket.Refresh(); err != nil {
+ // if we fail to refresh the bucket, exit the feed
+ // MB-14917
+ logging.Infof("Unable to refresh bucket %s ", err.Error())
+ close(feed.output)
+ feed.outputClosed = true
+ feed.closeNodeFeeds()
+ return
+ }
+
+ // this will only connect to nodes that are not connected or changed
+ // user will have to reconnect the stream
+ err := feed.connectToNodes()
+ if err != nil {
+ logging.Infof("Unable to connect to nodes..exit ")
+ close(feed.output)
+ feed.outputClosed = true
+ feed.closeNodeFeeds()
+ return
+ }
+ bucketOK = err == nil
+
+ select {
+ case <-time.After(retryInterval):
+ case <-feed.quit:
+ return
+ }
+ if retryInterval *= 2; retryInterval > maximumRetryInterval {
+ retryInterval = maximumRetryInterval
+ }
+ }
+}
+
+func (feed *UprFeed) connectToNodes() (err error) {
+ nodeCount := 0
+ for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) {
+
+ // this maybe a reconnection, so check if the connection to the node
+ // already exists. Connect only if the node is not found in the list
+ // or connected == false
+ nodeFeed := feed.nodeFeeds[serverConn.host]
+
+ if nodeFeed != nil && nodeFeed.connected == true {
+ continue
+ }
+
+ var singleFeed *memcached.UprFeed
+ var name string
+ if feed.name == "" {
+ name = "DefaultUprClient"
+ } else {
+ name = feed.name
+ }
+ singleFeed, err = serverConn.StartUprFeed(name, feed.sequence, feed.dcp_buffer_size, feed.data_chan_size)
+ if err != nil {
+ logging.Errorf("go-couchbase: Error connecting to upr feed of %s: %v", serverConn.host, err)
+ feed.closeNodeFeeds()
+ return
+ }
+ // add the node to the connection map
+ feedInfo := &FeedInfo{
+ uprFeed: singleFeed,
+ connected: true,
+ host: serverConn.host,
+ quit: make(chan bool),
+ }
+ feed.nodeFeeds[serverConn.host] = feedInfo
+ go feed.forwardUprEvents(feedInfo, feed.killSwitch, serverConn.host)
+ feed.wg.Add(1)
+ nodeCount++
+ }
+ if nodeCount == 0 {
+ return fmt.Errorf("No connection to bucket")
+ }
+
+ return nil
+}
+
+// Goroutine that forwards Upr events from a single node's feed to the aggregate feed.
+func (feed *UprFeed) forwardUprEvents(nodeFeed *FeedInfo, killSwitch chan bool, host string) {
+ singleFeed := nodeFeed.uprFeed
+
+ defer func() {
+ feed.wg.Done()
+ if r := recover(); r != nil {
+ //if feed is not closing, re-throw the panic
+ if feed.outputClosed != true && feed.closing != true {
+ panic(r)
+ } else {
+ logging.Errorf("Panic is recovered. Since feed is closed, exit gracefully")
+
+ }
+ }
+ }()
+
+ for {
+ select {
+ case <-nodeFeed.quit:
+ nodeFeed.connected = false
+ return
+
+ case event, ok := <-singleFeed.C:
+ if !ok {
+ if singleFeed.Error != nil {
+ logging.Errorf("go-couchbase: Upr feed from %s failed: %v", host, singleFeed.Error)
+ }
+ killSwitch <- true
+ return
+ }
+ if feed.outputClosed == true {
+ // someone closed the node feed
+ logging.Infof("Node need closed, returning from forwardUprEvent")
+ return
+ }
+ feed.output <- event
+ if event.Status == gomemcached.NOT_MY_VBUCKET {
+ logging.Infof(" Got a not my vbucket error !! ")
+ if err := feed.bucket.Refresh(); err != nil {
+ logging.Errorf("Unable to refresh bucket %s ", err.Error())
+ feed.closeNodeFeeds()
+ return
+ }
+ // this will only connect to nodes that are not connected or changed
+ // user will have to reconnect the stream
+ if err := feed.connectToNodes(); err != nil {
+ logging.Errorf("Unable to connect to nodes %s", err.Error())
+ return
+ }
+
+ }
+ }
+ }
+}
+
+func (feed *UprFeed) closeNodeFeeds() {
+ for _, f := range feed.nodeFeeds {
+ logging.Infof(" Sending close to forwardUprEvent ")
+ close(f.quit)
+ f.uprFeed.Close()
+ }
+ feed.nodeFeeds = nil
+}
+
+// Close a Upr feed.
+func (feed *UprFeed) Close() error {
+ select {
+ case <-feed.quit:
+ return nil
+ default:
+ }
+
+ feed.closing = true
+ feed.closeNodeFeeds()
+ close(feed.quit)
+
+ feed.wg.Wait()
+ if feed.outputClosed == false {
+ feed.outputClosed = true
+ close(feed.output)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/users.go b/vendor/github.com/couchbase/go-couchbase/users.go
new file mode 100644
index 00000000..4e8f9629
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/users.go
@@ -0,0 +1,121 @@
+package couchbase
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type User struct {
+ Name string
+ Id string
+ Domain string
+ Roles []Role
+}
+
+type Role struct {
+ Role string
+ BucketName string `json:"bucket_name"`
+ ScopeName string `json:"scope_name"`
+ CollectionName string `json:"collection_name"`
+}
+
+// Sample:
+// {"role":"admin","name":"Admin","desc":"Can manage ALL cluster features including security.","ce":true}
+// {"role":"query_select","bucket_name":"*","name":"Query Select","desc":"Can execute SELECT statement on bucket to retrieve data"}
+type RoleDescription struct {
+ Role string
+ Name string
+ Desc string
+ Ce bool
+ BucketName string `json:"bucket_name"`
+}
+
+// Return user-role data, as parsed JSON.
+// Sample:
+// [{"id":"ivanivanov","name":"Ivan Ivanov","roles":[{"role":"cluster_admin"},{"bucket_name":"default","role":"bucket_admin"}]},
+// {"id":"petrpetrov","name":"Petr Petrov","roles":[{"role":"replication_admin"}]}]
+func (c *Client) GetUserRoles() ([]interface{}, error) {
+ ret := make([]interface{}, 0, 1)
+ err := c.parseURLResponse("/settings/rbac/users", &ret)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the configured administrator.
+ // Expected result: {"port":8091,"username":"Administrator"}
+ adminInfo := make(map[string]interface{}, 2)
+ err = c.parseURLResponse("/settings/web", &adminInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a special entry for the configured administrator.
+ adminResult := map[string]interface{}{
+ "name": adminInfo["username"],
+ "id": adminInfo["username"],
+ "domain": "ns_server",
+ "roles": []interface{}{
+ map[string]interface{}{
+ "role": "admin",
+ },
+ },
+ }
+
+ // Add the configured administrator to the list of results.
+ ret = append(ret, adminResult)
+
+ return ret, nil
+}
+
+func (c *Client) GetUserInfoAll() ([]User, error) {
+ ret := make([]User, 0, 16)
+ err := c.parseURLResponse("/settings/rbac/users", &ret)
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+func rolesToParamFormat(roles []Role) string {
+ var buffer bytes.Buffer
+ for i, role := range roles {
+ if i > 0 {
+ buffer.WriteString(",")
+ }
+ buffer.WriteString(role.Role)
+ if role.BucketName != "" {
+ buffer.WriteString("[")
+ buffer.WriteString(role.BucketName)
+ buffer.WriteString("]")
+ }
+ }
+ return buffer.String()
+}
+
+func (c *Client) PutUserInfo(u *User) error {
+ params := map[string]interface{}{
+ "name": u.Name,
+ "roles": rolesToParamFormat(u.Roles),
+ }
+ var target string
+ switch u.Domain {
+ case "external":
+ target = "/settings/rbac/users/" + u.Id
+ case "local":
+ target = "/settings/rbac/users/local/" + u.Id
+ default:
+ return fmt.Errorf("Unknown user type: %s", u.Domain)
+ }
+ var ret string // PUT returns an empty string. We ignore it.
+ err := c.parsePutURLResponse(target, params, &ret)
+ return err
+}
+
+func (c *Client) GetRolesAll() ([]RoleDescription, error) {
+ ret := make([]RoleDescription, 0, 32)
+ err := c.parseURLResponse("/settings/rbac/roles", &ret)
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/util.go b/vendor/github.com/couchbase/go-couchbase/util.go
new file mode 100644
index 00000000..4d286a32
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/util.go
@@ -0,0 +1,49 @@
+package couchbase
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// CleanupHost returns the hostname with the given suffix removed.
+func CleanupHost(h, commonSuffix string) string {
+ if strings.HasSuffix(h, commonSuffix) {
+ return h[:len(h)-len(commonSuffix)]
+ }
+ return h
+}
+
+// FindCommonSuffix returns the longest common suffix from the given
+// strings.
+func FindCommonSuffix(input []string) string {
+ rv := ""
+ if len(input) < 2 {
+ return ""
+ }
+ from := input
+ for i := len(input[0]); i > 0; i-- {
+ common := true
+ suffix := input[0][i:]
+ for _, s := range from {
+ if !strings.HasSuffix(s, suffix) {
+ common = false
+ break
+ }
+ }
+ if common {
+ rv = suffix
+ }
+ }
+ return rv
+}
+
+// ParseURL is a wrapper around url.Parse with some sanity-checking
+func ParseURL(urlStr string) (result *url.URL, err error) {
+ result, err = url.Parse(urlStr)
+ if result != nil && result.Scheme == "" {
+ result = nil
+ err = fmt.Errorf("invalid URL <%s>", urlStr)
+ }
+ return
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/vbmap.go b/vendor/github.com/couchbase/go-couchbase/vbmap.go
new file mode 100644
index 00000000..b96a18ed
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/vbmap.go
@@ -0,0 +1,77 @@
+package couchbase
+
+var crc32tab = []uint32{
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}
+
+// VBHash finds the vbucket for the given key.
+func (b *Bucket) VBHash(key string) uint32 {
+ crc := uint32(0xffffffff)
+ for x := 0; x < len(key); x++ {
+ crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff]
+ }
+ vbm := b.VBServerMap()
+ return ((^crc) >> 16) & 0x7fff & (uint32(len(vbm.VBucketMap)) - 1)
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/views.go b/vendor/github.com/couchbase/go-couchbase/views.go
new file mode 100644
index 00000000..2f68642f
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/views.go
@@ -0,0 +1,231 @@
+package couchbase
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// ViewRow represents a single result from a view.
+//
+// Doc is present only if include_docs was set on the request.
+type ViewRow struct {
+ ID string
+ Key interface{}
+ Value interface{}
+ Doc *interface{}
+}
+
+// A ViewError is a node-specific error indicating a partial failure
+// within a view result.
+type ViewError struct {
+ From string
+ Reason string
+}
+
+func (ve ViewError) Error() string {
+ return "Node: " + ve.From + ", reason: " + ve.Reason
+}
+
+// ViewResult holds the entire result set from a view request,
+// including the rows and the errors.
+type ViewResult struct {
+ TotalRows int `json:"total_rows"`
+ Rows []ViewRow
+ Errors []ViewError
+}
+
+func (b *Bucket) randomBaseURL() (*url.URL, error) {
+ nodes := b.HealthyNodes()
+ if len(nodes) == 0 {
+ return nil, errors.New("no available couch rest URLs")
+ }
+ nodeNo := rand.Intn(len(nodes))
+ node := nodes[nodeNo]
+
+ b.RLock()
+ name := b.Name
+ pool := b.pool
+ b.RUnlock()
+
+ u, err := ParseURL(node.CouchAPIBase)
+ if err != nil {
+ return nil, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
+ name, nodeNo, node.CouchAPIBase, err)
+ } else if pool != nil {
+ u.User = pool.client.BaseURL.User
+ }
+ return u, err
+}
+
+const START_NODE_ID = -1
+
+func (b *Bucket) randomNextURL(lastNode int) (*url.URL, int, error) {
+ nodes := b.HealthyNodes()
+ if len(nodes) == 0 {
+ return nil, -1, errors.New("no available couch rest URLs")
+ }
+
+ var nodeNo int
+ if lastNode == START_NODE_ID || lastNode >= len(nodes) {
+ // randomly select a node if the value of lastNode is invalid
+ nodeNo = rand.Intn(len(nodes))
+ } else {
+ // wrap around the node list
+ nodeNo = (lastNode + 1) % len(nodes)
+ }
+
+ b.RLock()
+ name := b.Name
+ pool := b.pool
+ b.RUnlock()
+
+ node := nodes[nodeNo]
+ u, err := ParseURL(node.CouchAPIBase)
+ if err != nil {
+ return nil, -1, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
+ name, nodeNo, node.CouchAPIBase, err)
+ } else if pool != nil {
+ u.User = pool.client.BaseURL.User
+ }
+ return u, nodeNo, err
+}
+
+// DocID is the document ID type for the startkey_docid parameter in
+// views.
+type DocID string
+
+func qParam(k, v string) string {
+ format := `"%s"`
+ switch k {
+ case "startkey_docid", "endkey_docid", "stale":
+ format = "%s"
+ }
+ return fmt.Sprintf(format, v)
+}
+
+// ViewURL constructs a URL for a view with the given ddoc, view name,
+// and parameters.
+func (b *Bucket) ViewURL(ddoc, name string,
+ params map[string]interface{}) (string, error) {
+ u, err := b.randomBaseURL()
+ if err != nil {
+ return "", err
+ }
+
+ values := url.Values{}
+ for k, v := range params {
+ switch t := v.(type) {
+ case DocID:
+ values[k] = []string{string(t)}
+ case string:
+ values[k] = []string{qParam(k, t)}
+ case int:
+ values[k] = []string{fmt.Sprintf(`%d`, t)}
+ case bool:
+ values[k] = []string{fmt.Sprintf(`%v`, t)}
+ default:
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", fmt.Errorf("unsupported value-type %T in Query, "+
+ "json encoder said %v", t, err)
+ }
+ values[k] = []string{fmt.Sprintf(`%v`, string(b))}
+ }
+ }
+
+ if ddoc == "" && name == "_all_docs" {
+ u.Path = fmt.Sprintf("/%s/_all_docs", b.GetName())
+ } else {
+ u.Path = fmt.Sprintf("/%s/_design/%s/_view/%s", b.GetName(), ddoc, name)
+ }
+ u.RawQuery = values.Encode()
+
+ return u.String(), nil
+}
+
+// ViewCallback is called for each view invocation.
+var ViewCallback func(ddoc, name string, start time.Time, err error)
+
+// ViewCustom performs a view request that can map row values to a
+// custom type.
+//
+// See the source to View for an example usage.
+func (b *Bucket) ViewCustom(ddoc, name string, params map[string]interface{},
+ vres interface{}) (err error) {
+ if SlowServerCallWarningThreshold > 0 {
+ defer slowLog(time.Now(), "call to ViewCustom(%q, %q)", ddoc, name)
+ }
+
+ if ViewCallback != nil {
+ defer func(t time.Time) { ViewCallback(ddoc, name, t, err) }(time.Now())
+ }
+
+ u, err := b.ViewURL(ddoc, name, params)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("GET", u, nil)
+ if err != nil {
+ return err
+ }
+
+ ah := b.authHandler(false /* bucket not yet locked */)
+ maybeAddAuth(req, ah)
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return fmt.Errorf("error starting view req at %v: %v", u, err)
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != 200 {
+ bod := make([]byte, 512)
+ l, _ := res.Body.Read(bod)
+ return fmt.Errorf("error executing view req at %v: %v - %s",
+ u, res.Status, bod[:l])
+ }
+
+ body, err := ioutil.ReadAll(res.Body)
+ if err := json.Unmarshal(body, vres); err != nil {
+ return nil
+ }
+
+ return nil
+}
+
+// View executes a view.
+//
+// The ddoc parameter is just the bare name of your design doc without
+// the "_design/" prefix.
+//
+// Parameters are string keys with values that correspond to couchbase
+// view parameters. Primitive should work fairly naturally (booleans,
+// ints, strings, etc...) and other values will attempt to be JSON
+// marshaled (useful for array indexing on on view keys, for example).
+//
+// Example:
+//
+// res, err := couchbase.View("myddoc", "myview", map[string]interface{}{
+// "group_level": 2,
+// "startkey_docid": []interface{}{"thing"},
+// "endkey_docid": []interface{}{"thing", map[string]string{}},
+// "stale": false,
+// })
+func (b *Bucket) View(ddoc, name string, params map[string]interface{}) (ViewResult, error) {
+ vres := ViewResult{}
+
+ if err := b.ViewCustom(ddoc, name, params, &vres); err != nil {
+ //error in accessing views. Retry once after a bucket refresh
+ b.Refresh()
+ return vres, b.ViewCustom(ddoc, name, params, &vres)
+ } else {
+ return vres, nil
+ }
+}
diff --git a/vendor/github.com/couchbase/go_n1ql/.gitignore b/vendor/github.com/couchbase/go_n1ql/.gitignore
new file mode 100644
index 00000000..daf913b1
--- /dev/null
+++ b/vendor/github.com/couchbase/go_n1ql/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/couchbase/go_n1ql/LICENSE.txt b/vendor/github.com/couchbase/go_n1ql/LICENSE.txt
new file mode 100644
index 00000000..36049f01
--- /dev/null
+++ b/vendor/github.com/couchbase/go_n1ql/LICENSE.txt
@@ -0,0 +1,5 @@
+Source code in this repository is licensed under various licenses. The
+Business Source License 1.1 (BSL) is one such license. Each file indicates in
+a section at the beginning of the file the name of the license that applies to
+it. All licenses used in this repository can be found in the top-level
+licenses directory.
diff --git a/vendor/github.com/couchbase/go_n1ql/README.md b/vendor/github.com/couchbase/go_n1ql/README.md
new file mode 100644
index 00000000..432b6387
--- /dev/null
+++ b/vendor/github.com/couchbase/go_n1ql/README.md
@@ -0,0 +1,191 @@
+# go_n1ql
+
+## N1QL Driver for Go's `database/sql` package
+
+This library implements the standard go database APIs for
+[database/sql](http://golang.org/pkg/database/sql/) and
+[database/sql/driver](http://golang.org/pkg/database/sql/driver/).
+
+## Installation
+
+```bash
+go get github.com/couchbase/go_n1ql
+cd $GOPATH/src/github.com/couchbase/go_n1ql
+go get ...
+```
+
+## Test
+
+The test code assumes an instance of Couchbase is running on the local machine,
+and the *beer-sample* sample bucket is loaded.
+
+From the */go_n1ql* directory, run:
+
+ go test .
+
+## Example Application
+
+See [./example/example.go](https://github.com/couchbase/go_n1ql/blob/master/example/example.go)
+
+Start
+```bash
+cbq_engine ./cbq_engine -datastore=dir:../../test/json
+./example
+```
+
+## Imports
+
+To use the `go_n1ql` driver the following two imports are required
+
+```go
+import (
+ "database/sql"
+ _ "github.com/couchbase/go_n1ql"
+)
+```
+
+## Connecting to N1QL
+
+The `go_n1ql` driver allows you to connect to either a standalone instance of N1QL or
+a couchbase cluster endpoint.
+
+### Connect to a standalone N1QL instance
+
+```go
+n1ql, err := sql.Open("n1ql", "localhost:8093")
+```
+### Connect to a couchbase cluster
+
+```go
+n1ql, err := sql.Open("n1ql", "http://localhost:9000/")
+```
+The driver will discover the N1QL endpoints in the cluster and connect to one of them.
+
+## Query Options
+
+Various Query options can be set by calling SetQueryParams. See example below
+
+```go
+import go_n1ql "github.com/couchbase/go_n1ql"
+
+ac := []byte(`[{"user": "admin:Administrator", "pass": "asdasd"}]`)
+go_n1ql.SetQueryParams("creds", ac)
+go_n1ql.SetQueryParams("timeout", "10s")
+```
+
+## Running Select Queries
+
+### Running queries without positional parameters
+
+```go
+rows, err := n1ql.Query("select * from contacts where contacts.name = \"dave\"")
+if err != nil {
+ log.Fatal(err)
+}
+defer rows.Close()
+for rows.Next() {
+ var contacts string
+ if err := rows.Scan(&contacts); err != nil {
+ log.Fatal(err)
+ }
+ log.Printf("Row returned %s : \n", contacts)
+}
+```
+
+Note that since Couchbase is a document oriented database there are no columns. Each document in the
+database is treated as a row. For queries of the form `SELECT * FROM bucket` the results will be
+returned in a single column. Queries where the result expression is not `*` will return the results in
+multiple columns.
+
+#### Example query returning multiple columns
+
+```go
+rows, err := n1ql.Query("select personal_details, shipped_order_history from users_with_orders where doc_type=\"user_profile\" and personal_details.age = 60")
+
+if err != nil {
+ log.Fatal(err)
+}
+
+defer rows.Close()
+for rows.Next() {
+ var personal, shipped string
+ if err := rows.Scan(&personal, &shipped); err != nil {
+ log.Fatal(err)
+ }
+ log.Printf("Row returned personal_details: %s shipped_order_history %s : \n", personal, shipped)
+}
+```
+
+### Running queries with positional parameters
+
+Positional parameters are supported by the Queryer/Execer interface and by the Statement (prepared statement) interface
+
+#### Example of a Prepared statement with positional parameters
+
+```go
+stmt, err := n1ql.Prepare("select personal_details, shipped_order_history from users_with_orders where doc_type=? and personal_details.age = ?")
+
+rows, err = stmt.Query("user_profile", 60)
+if err != nil {
+ log.Fatal(err)
+}
+
+if err != nil {
+ log.Fatal(err)
+}
+defer rows.Close()
+for rows.Next() {
+ var personal, shipped string
+ if err := rows.Scan(&personal, &shipped); err != nil {
+ log.Fatal(err)
+ }
+ log.Printf("Row returned personal_details: %s shipped_order_history %s : \n", personal, shipped)
+}
+```
+
+## Running DML Queries
+
+DML queries are supported via the Execer and Statment interface. These statements are not expected to return
+any rows, instead the number of rows mutated/modified will be returned
+
+### Example usage of the Execer interface
+
+```go
+result, err := n1ql.Exec("Upsert INTO contacts values (\"irish\",{\"name\":\"irish\", \"type\":\"contact\"})")
+if err != nil {
+ log.Fatal(err)
+}
+
+rowsAffected, err := result.RowsAffected()
+if err != nil {
+ log.Fatal(err)
+}
+log.Printf("Rows affected %d", rowsAffected)
+```
+
+### Example usage of Prepared Statements with Exec
+
+```go
+stmt, err = n1ql.Prepare("Upsert INTO contacts values (?,?)")
+if err != nil {
+ log.Fatal(err)
+}
+
+// Map Values need to be marshaled
+value, _ := json.Marshal(map[string]interface{}{"name": "irish", "type": "contact"})
+result, err = stmt.Exec("irish4", value)
+if err != nil {
+ log.Fatal(err)
+}
+
+rowsAffected, err = result.RowsAffected()
+if err != nil {
+ log.Fatal(err)
+}
+log.Printf("Rows affected %d", rowsAffected)
+```
+
+### Note
+Any positional values that contain either arrays or maps or any combination thereof
+need to be marshalled and passed as type `[]byte`
+
diff --git a/vendor/github.com/couchbase/go_n1ql/n1ql.go b/vendor/github.com/couchbase/go_n1ql/n1ql.go
new file mode 100644
index 00000000..161a814e
--- /dev/null
+++ b/vendor/github.com/couchbase/go_n1ql/n1ql.go
@@ -0,0 +1,777 @@
+// Copyright 2014-Present Couchbase, Inc.
+//
+// Use of this software is governed by the Business Source License included
+// in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
+// in that file, in accordance with the Business Source License, use of this
+// software will be governed by the Apache License, Version 2.0, included in
+// the file licenses/APL2.txt.
+
+package go_n1ql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/couchbase/go-couchbase"
+)
+
+// Common error codes
+var (
+ ErrNotSupported = fmt.Errorf("N1QL:Not supported")
+ ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
+ ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
+ ErrInternalError = fmt.Errorf("N1QL: Internal Error")
+)
+
+// defaults
+var (
+ N1QL_SERVICE_ENDPOINT = "/query/service"
+ N1QL_DEFAULT_HOST = "127.0.0.1"
+ N1QL_DEFAULT_PORT = 8093
+ N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
+ N1QL_DEFAULT_STATEMENT = "SELECT 1"
+)
+
+// flags
+
+var (
+ N1QL_PASSTHROUGH_MODE = false
+)
+
+// Rest API query parameters
+var QueryParams map[string]string
+
+// Username and password. Used for querying the cluster endpoint,
+// which may require authorization.
+var username, password string
+
+func SetQueryParams(key string, value string) error {
+
+ if key == "" {
+ return fmt.Errorf("N1QL: Key not specified")
+ }
+
+ QueryParams[key] = value
+ return nil
+}
+
+func UnsetQueryParams(key string) error {
+
+ if key == "" {
+ return fmt.Errorf("N1QL: Key not specified")
+ }
+
+ delete(QueryParams, key)
+ return nil
+}
+
+func SetPassthroughMode(val bool) {
+ N1QL_PASSTHROUGH_MODE = val
+}
+
+func SetUsernamePassword(u, p string) {
+ username = u
+ password = p
+}
+
+// implements Driver interface
+type n1qlDrv struct{}
+
+func init() {
+ sql.Register("n1ql", &n1qlDrv{})
+ QueryParams = make(map[string]string)
+}
+
+func (n *n1qlDrv) Open(name string) (driver.Conn, error) {
+ return OpenN1QLConnection(name)
+}
+
+// implements driver.Conn interface
+type n1qlConn struct {
+ clusterAddr string
+ queryAPIs []string
+ client *http.Client
+ lock sync.RWMutex
+}
+
+// HTTPClient to use for REST and view operations.
+var MaxIdleConnsPerHost = 10
+var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
+var HTTPClient = &http.Client{Transport: HTTPTransport}
+
+func discoverN1QLService(name string, ps couchbase.PoolServices) string {
+
+ for _, ns := range ps.NodesExt {
+ if ns.Services != nil {
+ if port, ok := ns.Services["n1ql"]; ok == true {
+ var hostname string
+ //n1ql service found
+ if ns.Hostname == "" {
+ hostUrl, _ := url.Parse(name)
+ hn := hostUrl.Host
+ hostname = strings.Split(hn, ":")[0]
+ } else {
+ hostname = ns.Hostname
+ }
+
+ return fmt.Sprintf("%s:%d", hostname, port)
+ }
+ }
+ }
+ return ""
+}
+
+func getQueryApi(n1qlEndPoint string) ([]string, error) {
+ queryAdmin := "http://" + n1qlEndPoint + "/admin/clusters/default/nodes"
+ request, _ := http.NewRequest("GET", queryAdmin, nil)
+ request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+ queryAPIs := make([]string, 0)
+
+ hostname := strings.Split(n1qlEndPoint, ":")[0]
+
+ resp, err := HTTPClient.Do(request)
+ if err != nil {
+ return nil, fmt.Errorf("%v", err)
+ }
+
+ if resp.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
+ return nil, fmt.Errorf("%s", bod)
+ }
+
+ var nodesInfo []interface{}
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
+ }
+
+ if err := json.Unmarshal(body, &nodesInfo); err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
+ }
+
+ for _, queryNode := range nodesInfo {
+ switch queryNode := queryNode.(type) {
+ case map[string]interface{}:
+ queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
+ }
+ }
+
+ // if the end-points contain 127.0.0.1 then replace them with the actual hostname
+ for i, qa := range queryAPIs {
+ queryAPIs[i] = strings.Replace(qa, "127.0.0.1", hostname, -1)
+ }
+
+ if len(queryAPIs) == 0 {
+ return nil, fmt.Errorf("Query endpoints not found")
+ }
+
+ return queryAPIs, nil
+}
+
+// Adds authorization information to the given url.
+// A url like http://localhost:8091/ is converted to
+// http://user:password@localhost:8091/ .
+func addAuthorization(url string) string {
+ if strings.Contains(url, "@") {
+ // Already contains authorization.
+ return url
+ }
+ if username == "" {
+ // Username/password not set.
+ return url
+ }
+ // Assume the URL is in one of 3 forms:
+ // http://hostname:port...
+ // https://hostname:port...
+ // hostname:port...
+ // Where the ... indicates zero or more trailing characters.
+ userInfo := username + ":" + password + "@"
+ var prefix string
+ if strings.HasPrefix(url, "http://") {
+ prefix = "http://"
+ } else if strings.HasPrefix(url, "https://") {
+ prefix = "https://"
+ } else {
+ prefix = ""
+ }
+ suffix := strings.TrimPrefix(url, prefix)
+ return prefix + userInfo + suffix
+}
+
+func OpenN1QLConnection(name string) (driver.Conn, error) {
+ var queryAPIs []string
+
+ if strings.HasPrefix(name, "https") {
+ HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+ }
+
+ name = addAuthorization(name)
+
+ //First check if the input string is a cluster endpoint
+ client, err := couchbase.Connect(name)
+ var perr error = nil
+ if err != nil {
+ perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
+ // If not cluster endpoint then check if query endpoint
+ name = strings.TrimSuffix(name, "/")
+ queryAPI := name + N1QL_SERVICE_ENDPOINT
+ queryAPIs = make([]string, 1, 1)
+ queryAPIs[0] = queryAPI
+
+ } else {
+ ps, err := client.GetPoolServices("default")
+ if err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
+ }
+
+ n1qlEndPoint := discoverN1QLService(name, ps)
+ if n1qlEndPoint == "" {
+ return nil, fmt.Errorf("N1QL: No query service found on this cluster")
+ }
+
+ queryAPIs, err = getQueryApi(n1qlEndPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+
+ conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
+
+ request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := conn.client.Do(request)
+ if err != nil {
+ var final_error string
+ if perr != nil {
+ final_error = fmt.Errorf("N1QL: Connection failed %v", err).Error() + "\n" + perr.Error()
+ } else {
+ final_error = fmt.Errorf("N1QL: Connection failed %v", err).Error()
+ }
+ return nil, fmt.Errorf("%v", final_error)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
+ return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
+ }
+
+ return conn, nil
+}
+
+// do client request with retry
+func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
+
+ ok := false
+ for !ok {
+
+ var request *http.Request
+ var err error
+
+ // select query API
+ rand.Seed(time.Now().Unix())
+ numNodes := len(conn.queryAPIs)
+
+ selectedNode := rand.Intn(numNodes)
+ conn.lock.RLock()
+ queryAPI := conn.queryAPIs[selectedNode]
+ conn.lock.RUnlock()
+
+ if query != "" {
+ request, err = prepareRequest(query, queryAPI, nil)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ if requestValues != nil {
+ request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
+ } else {
+ request, _ = http.NewRequest("POST", queryAPI, nil)
+ }
+ request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+ }
+
+ resp, err := conn.client.Do(request)
+ if err != nil {
+ // if this is the last node return with error
+ if numNodes == 1 {
+ break
+ }
+ // remove the node that failed from the list of query nodes
+ conn.lock.Lock()
+ conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
+ conn.lock.Unlock()
+ continue
+ } else {
+ return resp, nil
+ }
+ }
+
+ return nil, fmt.Errorf("N1QL: Query nodes not responding")
+}
+
+func serializeErrors(errors interface{}) string {
+
+ var errString string
+ switch errors := errors.(type) {
+ case []interface{}:
+ for _, e := range errors {
+ switch e := e.(type) {
+ case map[string]interface{}:
+ code, _ := e["code"]
+ msg, _ := e["msg"]
+
+ if code != 0 && msg != "" {
+ if errString != "" {
+ errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
+ } else {
+ errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
+ }
+ }
+ }
+ }
+ }
+
+ if errString != "" {
+ return errString
+ }
+ return fmt.Sprintf(" Error %v %T", errors, errors)
+}
+
+func (conn *n1qlConn) Prepare(query string) (driver.Stmt, error) {
+ var argCount int
+
+ query = "PREPARE " + query
+ query, argCount = prepareQuery(query)
+
+ resp, err := conn.doClientRequest(query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
+ return nil, fmt.Errorf("%s", bod)
+ }
+
+ var resultMap map[string]*json.RawMessage
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
+ }
+
+ if err := json.Unmarshal(body, &resultMap); err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
+ }
+
+ stmt := &n1qlStmt{conn: conn, argCount: argCount}
+
+ errors, ok := resultMap["errors"]
+ if ok && errors != nil {
+ var errs []interface{}
+ _ = json.Unmarshal(*errors, &errs)
+ return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
+ }
+
+ for name, results := range resultMap {
+ switch name {
+ case "results":
+ var preparedResults []interface{}
+ if err := json.Unmarshal(*results, &preparedResults); err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
+ }
+ if len(preparedResults) == 0 {
+ return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
+ }
+ serialized, _ := json.Marshal(preparedResults[0])
+ stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
+ stmt.prepared = string(serialized)
+ case "signature":
+ stmt.signature = string(*results)
+ }
+ }
+
+ if stmt.prepared == "" {
+ return nil, ErrInternalError
+ }
+
+ return stmt, nil
+}
+
+func (conn *n1qlConn) Begin() (driver.Tx, error) {
+ return nil, ErrNotSupported
+}
+
+func (conn *n1qlConn) Close() error {
+ return nil
+}
+
+func decodeSignature(signature *json.RawMessage) interface{} {
+
+ var sign interface{}
+ var rows map[string]interface{}
+
+ json.Unmarshal(*signature, &sign)
+
+ switch s := sign.(type) {
+ case map[string]interface{}:
+ return s
+ case string:
+ return s
+ default:
+ fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
+ return map[string]interface{}{"*": "*"}
+ }
+
+ return rows
+}
+
+func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (driver.Rows, error) {
+
+ resp, err := conn.doClientRequest(query, requestValues)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
+ return nil, fmt.Errorf("%s", bod)
+ }
+
+ var resultMap map[string]*json.RawMessage
+ decoder := json.NewDecoder(resp.Body)
+
+ err = decoder.Decode(&resultMap)
+ if err != nil {
+ return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
+ }
+
+ var signature interface{}
+ var resultRows *json.RawMessage
+ var metrics interface{}
+ var status interface{}
+ var requestId interface{}
+ var errs interface{}
+
+ for name, results := range resultMap {
+ switch name {
+ case "errors":
+ _ = json.Unmarshal(*results, &errs)
+ case "signature":
+ if results != nil {
+ signature = decodeSignature(results)
+ } else if N1QL_PASSTHROUGH_MODE == true {
+ // for certain types of DML queries, the returned signature could be null
+ // however in passthrough mode we always return the metrics, status etc as
+ // rows therefore we need to ensure that there is a default signature.
+ signature = map[string]interface{}{"*": "*"}
+ }
+ case "results":
+ resultRows = results
+ case "metrics":
+ if N1QL_PASSTHROUGH_MODE == true {
+ _ = json.Unmarshal(*results, &metrics)
+ }
+ case "status":
+ if N1QL_PASSTHROUGH_MODE == true {
+ _ = json.Unmarshal(*results, &status)
+ }
+ case "requestID":
+ if N1QL_PASSTHROUGH_MODE == true {
+ _ = json.Unmarshal(*results, &requestId)
+ }
+ }
+ }
+
+ if N1QL_PASSTHROUGH_MODE == true {
+ extraVals := map[string]interface{}{"requestID": requestId,
+ "status": status,
+ "signature": signature,
+ }
+
+ // in passthrough mode last line will always be en error line
+ errors := map[string]interface{}{"errors": errs}
+ return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
+ }
+
+ // we return the errors with the rows because we can have scenarios where there are valid
+ // results returned along with the error and this interface doesn't allow for both to be
+ // returned and hence this workaround.
+ return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
+
+}
+
+// Executes a query that returns a set of Rows.
+// Select statements should use this interface
+func (conn *n1qlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+
+ if len(args) > 0 {
+ var argCount int
+ query, argCount = prepareQuery(query)
+ if argCount != len(args) {
+ return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
+ }
+ query, args = preparePositionalArgs(query, argCount, args)
+ }
+
+ return conn.performQuery(query, nil)
+}
+
+func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (driver.Result, error) {
+
+ resp, err := conn.doClientRequest(query, requestValues)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
+ return nil, fmt.Errorf("%s", bod)
+ }
+
+ var resultMap map[string]*json.RawMessage
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
+ }
+
+ if err := json.Unmarshal(body, &resultMap); err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
+ }
+
+ var execErr error
+ res := &n1qlResult{}
+ for name, results := range resultMap {
+ switch name {
+ case "metrics":
+ var metrics map[string]interface{}
+ err := json.Unmarshal(*results, &metrics)
+ if err != nil {
+ return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
+ }
+ if mc, ok := metrics["mutationCount"]; ok {
+ res.affectedRows = int64(mc.(float64))
+ }
+ break
+ case "errors":
+ var errs []interface{}
+ _ = json.Unmarshal(*results, &errs)
+ execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
+ }
+ }
+
+ return res, execErr
+}
+
+// Execer implementation. To be used for queries that do not return any rows
+// such as Create Index, Insert, Upset, Delete etc
+func (conn *n1qlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+
+ if len(args) > 0 {
+ var argCount int
+ query, argCount = prepareQuery(query)
+ if argCount != len(args) {
+ return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
+ }
+ query, args = preparePositionalArgs(query, argCount, args)
+ }
+
+ return conn.performExec(query, nil)
+}
+
+func prepareQuery(query string) (string, int) {
+
+ var count int
+ re := regexp.MustCompile("\\?")
+
+ f := func(s string) string {
+ count++
+ return fmt.Sprintf("$%d", count)
+ }
+ return re.ReplaceAllStringFunc(query, f), count
+}
+
+//
+// Replace the conditional pqrams in the query and return the list of left-over args
+func preparePositionalArgs(query string, argCount int, args []driver.Value) (string, []driver.Value) {
+ subList := make([]string, 0)
+ newArgs := make([]driver.Value, 0)
+
+ for i, arg := range args {
+ if i < argCount {
+ var a string
+ switch arg := arg.(type) {
+ case string:
+ a = fmt.Sprintf("\"%v\"", arg)
+ case []byte:
+ a = string(arg)
+ default:
+ a = fmt.Sprintf("%v", arg)
+ }
+ sub := []string{fmt.Sprintf("$%d", i+1), a}
+ subList = append(subList, sub...)
+ } else {
+ newArgs = append(newArgs, arg)
+ }
+ }
+ r := strings.NewReplacer(subList...)
+ return r.Replace(query), newArgs
+}
+
+// prepare a http request for the query
+//
+func prepareRequest(query string, queryAPI string, args []driver.Value) (*http.Request, error) {
+
+ postData := url.Values{}
+ postData.Set("statement", query)
+
+ if len(args) > 0 {
+ paStr := buildPositionalArgList(args)
+ if len(paStr) > 0 {
+ postData.Set("args", paStr)
+ }
+ }
+
+ setQueryParams(&postData)
+ request, _ := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
+ request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+
+ return request, nil
+}
+
+//
+// Set query params
+
+func setQueryParams(v *url.Values) {
+
+ for key, value := range QueryParams {
+ v.Set(key, value)
+ }
+}
+
+type n1qlStmt struct {
+ conn *n1qlConn
+ prepared string
+ signature string
+ argCount int
+ name string
+}
+
+func (stmt *n1qlStmt) Close() error {
+ stmt.prepared = ""
+ stmt.signature = ""
+ stmt.argCount = 0
+ stmt = nil
+ return nil
+}
+
+func (stmt *n1qlStmt) NumInput() int {
+ return stmt.argCount
+}
+
+func buildPositionalArgList(args []driver.Value) string {
+ positionalArgs := make([]string, 0)
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case string:
+ // add double quotes since this is a string
+ positionalArgs = append(positionalArgs, fmt.Sprintf("\"%v\"", arg))
+ case []byte:
+ positionalArgs = append(positionalArgs, string(arg))
+ default:
+ positionalArgs = append(positionalArgs, fmt.Sprintf("%v", arg))
+ }
+ }
+
+ if len(positionalArgs) > 0 {
+ paStr := "["
+ for i, param := range positionalArgs {
+ if i == len(positionalArgs)-1 {
+ paStr = fmt.Sprintf("%s%s]", paStr, param)
+ } else {
+ paStr = fmt.Sprintf("%s%s,", paStr, param)
+ }
+ }
+ return paStr
+ }
+ return ""
+}
+
+// prepare a http request for the query
+//
+func (stmt *n1qlStmt) prepareRequest(args []driver.Value) (*url.Values, error) {
+
+ postData := url.Values{}
+
+ // use name prepared statement if possible
+ if stmt.name != "" {
+ postData.Set("prepared", fmt.Sprintf("\"%s\"", stmt.name))
+ } else {
+ postData.Set("prepared", stmt.prepared)
+ }
+
+ if len(args) < stmt.NumInput() {
+ return nil, fmt.Errorf("N1QL: Insufficient args. Prepared statement contains positional args")
+ }
+
+ if len(args) > 0 {
+ paStr := buildPositionalArgList(args)
+ if len(paStr) > 0 {
+ postData.Set("args", paStr)
+ }
+ }
+
+ setQueryParams(&postData)
+
+ return &postData, nil
+}
+
+func (stmt *n1qlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ if stmt.prepared == "" {
+ return nil, fmt.Errorf("N1QL: Prepared statement not found")
+ }
+
+retry:
+ requestValues, err := stmt.prepareRequest(args)
+ if err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.conn.performQuery("", requestValues)
+ if err != nil && stmt.name != "" {
+ // retry once if we used a named prepared statement
+ stmt.name = ""
+ goto retry
+ }
+
+ return rows, err
+}
+
+func (stmt *n1qlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.prepared == "" {
+ return nil, fmt.Errorf("N1QL: Prepared statement not found")
+ }
+ requestValues, err := stmt.prepareRequest(args)
+ if err != nil {
+ return nil, err
+ }
+
+ return stmt.conn.performExec("", requestValues)
+}
diff --git a/vendor/github.com/couchbase/go_n1ql/result.go b/vendor/github.com/couchbase/go_n1ql/result.go
new file mode 100644
index 00000000..2b53b160
--- /dev/null
+++ b/vendor/github.com/couchbase/go_n1ql/result.go
@@ -0,0 +1,22 @@
+// Copyright 2014-Present Couchbase, Inc.
+//
+// Use of this software is governed by the Business Source License included
+// in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
+// in that file, in accordance with the Business Source License, use of this
+// software will be governed by the Apache License, Version 2.0, included in
+// the file licenses/APL2.txt.
+
+package go_n1ql
+
+type n1qlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *n1qlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *n1qlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/couchbase/go_n1ql/rows.go b/vendor/github.com/couchbase/go_n1ql/rows.go
new file mode 100644
index 00000000..2f5faec0
--- /dev/null
+++ b/vendor/github.com/couchbase/go_n1ql/rows.go
@@ -0,0 +1,171 @@
+// Copyright 2014-Present Couchbase, Inc.
+//
+// Use of this software is governed by the Business Source License included
+// in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
+// in that file, in accordance with the Business Source License, use of this
+// software will be governed by the Apache License, Version 2.0, included in
+// the file licenses/APL2.txt.
+
+package go_n1ql
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+)
+
+type n1qlRows struct {
+ resp *http.Response
+ results io.Reader
+ resultChan chan interface{}
+ errChan chan error
+ closed bool
+ signature interface{}
+ extras interface{}
+ metrics interface{}
+ errors interface{}
+ passthrough bool
+ columns []string
+ rowsSent int
+}
+
+func resultToRows(results io.Reader, resp *http.Response, signature interface{}, metrics, errors, extraVals interface{}) (*n1qlRows, error) {
+
+ rows := &n1qlRows{results: results,
+ resp: resp,
+ signature: signature,
+ extras: extraVals,
+ metrics: metrics,
+ errors: errors,
+ resultChan: make(chan interface{}, 1),
+ errChan: make(chan error),
+ }
+
+ // detect if we are in passthrough mode
+ if metrics != nil && extraVals != nil {
+ rows.passthrough = true
+ }
+
+ go rows.populateRows()
+
+ return rows, nil
+}
+
+func (rows *n1qlRows) populateRows() {
+ var resultRows []interface{}
+ defer rows.resp.Body.Close()
+
+ resultsDecoder := json.NewDecoder(rows.results)
+ err := resultsDecoder.Decode(&resultRows)
+
+ if err != nil {
+ rows.errChan <- err
+ }
+
+ if rows.extras != nil {
+ rows.resultChan <- rows.extras
+ }
+
+ // second row will be metrics
+ if rows.metrics != nil {
+ rows.resultChan <- rows.metrics
+ }
+
+ for _, row := range resultRows {
+ if rows.closed == true {
+ break
+ }
+ rows.resultChan <- row
+ }
+
+ if rows.errors != nil {
+ rows.resultChan <- rows.errors
+ }
+
+ close(rows.resultChan)
+
+}
+
+func (rows *n1qlRows) Columns() []string {
+
+ var columns = make([]string, 0)
+
+ switch s := rows.signature.(type) {
+ case map[string]interface{}:
+ for key, _ := range s {
+ columns = append(columns, key)
+ }
+ case string:
+ columns = append(columns, s)
+ case nil:
+ columns = append(columns, "null")
+ }
+
+ sort.Strings(columns)
+ rows.columns = columns
+ return columns
+}
+
+func (rows *n1qlRows) Close() error {
+ rows.closed = true
+ return nil
+}
+
+func (rows *n1qlRows) Next(dest []driver.Value) error {
+ select {
+ case r, ok := <-rows.resultChan:
+ if ok {
+ numColumns := len(rows.Columns())
+
+ if numColumns == 1 {
+ bytes, _ := json.Marshal(r)
+ dest[0] = bytes
+ } else if rows.passthrough == true && rows.rowsSent < 2 {
+ // first two rows in passthrough mode are status and metrics
+ // in passthrough mode if the query being executed has multiple projections
+ // then it is highly likely that the number of columns of the metrics/status
+ // will not match the number of columns, therefore the following hack
+ bytes, _ := json.Marshal(r)
+ dest[0] = bytes
+ for i := 1; i < numColumns; i++ {
+ dest[i] = ""
+ }
+ } else {
+ switch resultRow := r.(type) {
+ case map[string]interface{}:
+ if len(resultRow) > numColumns {
+ return fmt.Errorf("N1QL: More Colums than expected %d != %d r %v", len(resultRow), numColumns, r)
+ }
+ i := 0
+ for _, colName := range rows.columns {
+ if value, exists := resultRow[colName]; exists == true {
+ bytes, _ := json.Marshal(value)
+ dest[i] = bytes
+
+ } else {
+ dest[i] = ""
+ }
+ i++
+ }
+ case []interface{}:
+ i := 0
+ for _, value := range resultRow {
+ bytes, _ := json.Marshal(value)
+ dest[i] = bytes
+ i++
+ }
+
+ }
+ }
+ rows.rowsSent++
+ return nil
+ } else {
+ return io.EOF
+ }
+ case e := <-rows.errChan:
+ return e
+ }
+}
diff --git a/vendor/github.com/couchbase/gomemcached/.gitignore b/vendor/github.com/couchbase/gomemcached/.gitignore
new file mode 100644
index 00000000..cd8acba1
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/.gitignore
@@ -0,0 +1,7 @@
+#*
+*.[68]
+*~
+*.swp
+/gocache/gocache
+c.out
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/couchbase/gomemcached/.mockery.yaml b/vendor/github.com/couchbase/gomemcached/.mockery.yaml
new file mode 100644
index 00000000..25f03230
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/.mockery.yaml
@@ -0,0 +1,16 @@
+disable-version-string: True
+packages:
+ github.com/couchbase/gomemcached:
+ config:
+ recursive: True
+ all: True
+ dir: "mocks"
+ outpkg: "mocks"
+ mockname: "{{.InterfaceName}}"
+ github.com/couchbase/gomemcached/client:
+ config:
+ recursive: True
+ all: True
+ dir: "client/mocks"
+ outpkg: "mocks"
+ mockname: "{{.InterfaceName}}"
diff --git a/vendor/github.com/couchbase/gomemcached/LICENSE b/vendor/github.com/couchbase/gomemcached/LICENSE
new file mode 100644
index 00000000..b01ef802
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/couchbase/gomemcached/README.markdown b/vendor/github.com/couchbase/gomemcached/README.markdown
new file mode 100644
index 00000000..5e9b2de5
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/README.markdown
@@ -0,0 +1,32 @@
+# gomemcached
+
+This is a memcached binary protocol toolkit in [go][go].
+
+It provides client and server functionality as well as a little sample
+server showing how I might make a server if I valued purity over
+performance.
+
+## Server Design
+
+
+

+
+
+The basic design can be seen in [gocache]. A [storage
+server][storage] is run as a goroutine that receives a `MCRequest` on
+a channel, and then issues an `MCResponse` to a channel contained
+within the request.
+
+Each connection is a separate goroutine, of course, and is responsible
+for all IO for that connection until the connection drops or the
+`dataServer` decides it's stupid and sends a fatal response back over
+the channel.
+
+There is currently no work at all in making the thing perform (there
+are specific areas I know need work). This is just my attempt to
+learn the language somewhat.
+
+[go]: http://golang.org/
+[gocache]: gomemcached/blob/master/gocache/gocache.go
+[storage]: gomemcached/blob/master/gocache/mc_storage.go
diff --git a/vendor/github.com/couchbase/gomemcached/client/collections_filter.go b/vendor/github.com/couchbase/gomemcached/client/collections_filter.go
new file mode 100644
index 00000000..a34d353f
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/collections_filter.go
@@ -0,0 +1,130 @@
+package memcached
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Collection based filter
+type CollectionsFilter struct {
+ ManifestUid uint64
+ UseManifestUid bool
+ StreamId uint16
+ UseStreamId bool
+
+ // Use either ScopeId OR CollectionsList, not both
+ CollectionsList []uint32
+ ScopeId uint32
+}
+
+type nonStreamIdNonCollectionsMeta struct {
+ ManifestId string `json:"uid"`
+}
+
+type nonStreamIdNonResumeCollectionsMeta struct {
+ CollectionsList []string `json:"collections"`
+}
+
+type nonStreamIdCollectionsMeta struct {
+ ManifestId string `json:"uid"`
+ CollectionsList []string `json:"collections"`
+}
+
+type streamIdNonResumeCollectionsMeta struct {
+ CollectionsList []string `json:"collections"`
+ StreamId uint16 `json:"sid"`
+}
+
+type streamIdNonResumeScopeMeta struct {
+ ScopeId string `json:"scope"`
+ StreamId uint16 `json:"sid"`
+}
+
+func (c *CollectionsFilter) IsValid() error {
+ if c.UseManifestUid && c.UseStreamId {
+ return fmt.Errorf("Not implemented yet")
+ }
+
+ if len(c.CollectionsList) > 0 && c.ScopeId > 0 {
+ return fmt.Errorf("Collection list is specified but scope ID is also specified")
+ }
+
+ return nil
+}
+
+func (c *CollectionsFilter) outputCollectionsFilterColList() (outputList []string) {
+ for _, collectionUint := range c.CollectionsList {
+ outputList = append(outputList, fmt.Sprintf("%x", collectionUint))
+ }
+ return
+}
+
+func (c *CollectionsFilter) outputScopeId() string {
+ return fmt.Sprintf("%x", c.ScopeId)
+}
+
+func (c *CollectionsFilter) ToStreamReqBody() ([]byte, error) {
+ if err := c.IsValid(); err != nil {
+ return nil, err
+ }
+
+ var output interface{}
+
+ switch c.UseStreamId {
+ case true:
+ switch c.UseManifestUid {
+ case true:
+ // TODO
+ return nil, fmt.Errorf("NotImplemented0")
+ case false:
+ switch len(c.CollectionsList) > 0 {
+ case true:
+ filter := &streamIdNonResumeCollectionsMeta{
+ StreamId: c.StreamId,
+ CollectionsList: c.outputCollectionsFilterColList(),
+ }
+ output = *filter
+ case false:
+ filter := &streamIdNonResumeScopeMeta{
+ StreamId: c.StreamId,
+ ScopeId: c.outputScopeId(),
+ }
+ output = *filter
+ }
+ }
+ case false:
+ switch c.UseManifestUid {
+ case true:
+ switch len(c.CollectionsList) > 0 {
+ case true:
+ filter := &nonStreamIdCollectionsMeta{
+ ManifestId: fmt.Sprintf("%x", c.ManifestUid),
+ CollectionsList: c.outputCollectionsFilterColList(),
+ }
+ output = *filter
+ case false:
+ filter := &nonStreamIdNonCollectionsMeta{
+ ManifestId: fmt.Sprintf("%x", c.ManifestUid),
+ }
+ output = *filter
+ }
+ case false:
+ switch len(c.CollectionsList) > 0 {
+ case true:
+ filter := &nonStreamIdNonResumeCollectionsMeta{
+ CollectionsList: c.outputCollectionsFilterColList(),
+ }
+ output = *filter
+ case false:
+ return nil, fmt.Errorf("Specifying scopeID must require the use of streamId")
+ }
+ }
+ }
+
+ data, err := json.Marshal(output)
+ if err != nil {
+ return nil, err
+ } else {
+ return data, nil
+ }
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/mc.go b/vendor/github.com/couchbase/gomemcached/client/mc.go
new file mode 100644
index 00000000..9a084aec
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/mc.go
@@ -0,0 +1,2055 @@
+// Package memcached provides a memcached binary protocol client.
+package memcached
+
+import (
+ "crypto/tls"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/goutils/logging"
+ "github.com/couchbase/goutils/scramsha"
+ "github.com/google/uuid"
+ "github.com/pkg/errors"
+)
+
+type ClientIface interface {
+ Add(vb uint16, key string, flags int, exp int, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ Append(vb uint16, key string, data []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ Auth(user, pass string) (*gomemcached.MCResponse, error)
+ AuthList() (*gomemcached.MCResponse, error)
+ AuthPlain(user, pass string) (*gomemcached.MCResponse, error)
+ AuthScramSha(user, pass string) (*gomemcached.MCResponse, error)
+ CASNext(vb uint16, k string, exp int, state *CASState) bool
+ CAS(vb uint16, k string, f CasFunc, initexp int) (*gomemcached.MCResponse, error)
+ CollectionsGetCID(scope string, collection string) (*gomemcached.MCResponse, error)
+ CollectionEnabled() bool
+ Close() error
+ Decr(vb uint16, key string, amt, def uint64, exp int, context ...*ClientContext) (uint64, error)
+ Del(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ EnableMutationToken() (*gomemcached.MCResponse, error)
+ EnableFeatures(features Features) (*gomemcached.MCResponse, error)
+ EnableDataPool(getter func(uint64) ([]byte, error), doneCb func([]byte)) error
+ Get(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetAllVbSeqnos(vbSeqnoMap map[uint16]uint64, context ...*ClientContext) (map[uint16]uint64, error)
+ GetAndTouch(vb uint16, key string, exp int, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string, context ...*ClientContext) error
+ GetCollectionsManifest() (*gomemcached.MCResponse, error)
+ GetMeta(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetRandomDoc(context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetSubdoc(vb uint16, key string, subPaths []string, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ SetSubdoc(vb uint16, key string, ops []SubDocOp, addOnly bool, exp int, cas uint64, context ...*ClientContext) (
+ *gomemcached.MCResponse, error)
+ Hijack() MemcachedConnection
+ Incr(vb uint16, key string, amt, def uint64, exp int, context ...*ClientContext) (uint64, error)
+ LastBucket() string
+ Observe(vb uint16, key string) (result ObserveResult, err error)
+ ObserveSeq(vb uint16, vbuuid uint64) (result *ObserveSeqResult, err error)
+ Receive() (*gomemcached.MCResponse, error)
+ ReceiveWithDeadline(deadline time.Time) (*gomemcached.MCResponse, error)
+ Replica() bool
+ Send(req *gomemcached.MCRequest) (rv *gomemcached.MCResponse, err error)
+ Set(vb uint16, key string, flags int, exp int, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ SetKeepAliveOptions(interval time.Duration)
+ SetReadDeadline(t time.Time)
+ SetDeadline(t time.Time)
+ SetReplica(r bool)
+ SelectBucket(bucket string) (*gomemcached.MCResponse, error)
+ SetCas(vb uint16, key string, flags int, exp int, cas uint64, body []byte, context ...*ClientContext) (
+ *gomemcached.MCResponse, error)
+ Stats(key string) ([]StatValue, error)
+ StatsFunc(key string, fn func(key, val []byte)) error
+ StatsMap(key string) (map[string]string, error)
+ StatsMapForSpecifiedStats(key string, statsMap map[string]string) error
+ Transmit(req *gomemcached.MCRequest) error
+ TransmitWithDeadline(req *gomemcached.MCRequest, deadline time.Time) error
+ TransmitResponse(res *gomemcached.MCResponse) error
+ UprGetFailoverLog(vb []uint16) (map[uint16]*FailoverLog, error)
+ GetConnName() string
+ SetConnName(name string)
+
+ // UprFeed Related
+ NewUprFeed() (*UprFeed, error)
+ NewUprFeedIface() (UprFeedIface, error)
+ NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error)
+ NewUprFeedWithConfigIface(ackByClient bool) (UprFeedIface, error)
+
+ CreateRangeScan(vb uint16, start []byte, excludeStart bool, end []byte, excludeEnd bool, withDocs bool,
+ context ...*ClientContext) (*gomemcached.MCResponse, error)
+ CreateRandomScan(vb uint16, sampleSize int, withDocs bool, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ ContinueRangeScan(vb uint16, uuid []byte, opaque uint32, items uint32, maxSize uint32, timeout uint32,
+ context ...*ClientContext) error
+ CancelRangeScan(vb uint16, uuid []byte, opaque uint32, context ...*ClientContext) (*gomemcached.MCResponse, error)
+
+ ValidateKey(vb uint16, key string, context ...*ClientContext) (bool, error)
+
+ GetErrorMap(errMapVersion gomemcached.ErrorMapVersion) (map[string]interface{}, error)
+}
+
+type ClientContext struct {
+ // Collection-based context
+ CollId uint32
+
+ // Impersonate context
+ User string
+
+ // VB-state related context
+ // nil means not used in this context
+ VbState *VbStateType
+
+ // Preserve Expiry
+ PreserveExpiry bool
+
+ // Durability Level
+ DurabilityLevel gomemcached.DurabilityLvl
+
+ // Durability Timeout
+ DurabilityTimeout time.Duration
+
+ // Data is JSON in snappy compressed format
+ Compressed bool
+
+ // Sub-doc paths are document fields (not XATTRs)
+ DocumentSubDocPaths bool
+
+ // Include XATTRs in random document retrieval
+ IncludeXATTRs bool
+}
+
+func (this *ClientContext) Copy() *ClientContext {
+ rv := &ClientContext{
+ CollId: this.CollId,
+ User: this.User,
+ PreserveExpiry: this.PreserveExpiry,
+ DurabilityLevel: this.DurabilityLevel,
+ DurabilityTimeout: this.DurabilityTimeout,
+ Compressed: this.Compressed,
+ }
+
+ rv.VbState = new(VbStateType)
+
+ if this.VbState != nil {
+ *rv.VbState = *this.VbState
+ }
+
+ return rv
+}
+
+type VbStateType uint8
+
+const (
+ VbAlive VbStateType = 0x00
+ VbActive VbStateType = 0x01
+ VbReplica VbStateType = 0x02
+ VbPending VbStateType = 0x03
+ VbDead VbStateType = 0x04
+)
+
+const RandomScanSeed = 0x5eedbead
+
+var (
+ ErrUnSuccessfulHello = errors.New("Unsuccessful HELLO exchange")
+ ErrInvalidHello = errors.New("Invalid HELLO response")
+ ErrPreserveExpiryNotSupported = errors.New("PreserveExpiry is not supported")
+ ErrDurabilityNotSupported = errors.New("Durability is not supported")
+)
+
+func (context *ClientContext) InitExtras(req *gomemcached.MCRequest, client *Client) {
+ if req == nil || client == nil {
+ return
+ }
+
+ var bytesToAllocate int
+ switch req.Opcode {
+ case gomemcached.GET_ALL_VB_SEQNOS:
+ if context.VbState != nil {
+ bytesToAllocate += 4
+ }
+ if client.CollectionEnabled() {
+ if context.VbState == nil {
+ bytesToAllocate += 8
+ } else {
+ bytesToAllocate += 4
+ }
+ }
+ }
+ if bytesToAllocate > 0 {
+ req.Extras = make([]byte, bytesToAllocate)
+ }
+}
+
+type SubDocOp struct {
+ Xattr bool
+ Path string
+ Value []byte
+ Counter bool
+}
+
+func (this *SubDocOp) encodedLength() int {
+ return 8 + len([]byte(this.Path)) + len(this.Value)
+}
+
+func (this *SubDocOp) encode(buf []byte) []byte {
+ if this.Counter {
+ buf = append(buf, byte(gomemcached.SUBDOC_COUNTER))
+ } else if this.Value == nil {
+ buf = append(buf, byte(gomemcached.SUBDOC_DELETE))
+ } else if this.Path == "" && !this.Xattr {
+ buf = append(buf, byte(gomemcached.SET))
+ } else {
+ buf = append(buf, byte(gomemcached.SUBDOC_DICT_UPSERT))
+ }
+ if this.Xattr {
+ buf = append(buf, byte(gomemcached.SUBDOC_FLAG_XATTR))
+ } else {
+ buf = append(buf, byte(0))
+ }
+
+ pathBytes := []byte(this.Path)
+
+ buf = binary.BigEndian.AppendUint16(buf, uint16(len(pathBytes)))
+ buf = binary.BigEndian.AppendUint32(buf, uint32(len(this.Value)))
+
+ buf = append(buf, pathBytes...)
+ if this.Value != nil {
+ buf = append(buf, this.Value...)
+ }
+
+ return buf
+}
+
+const bufsize = 1024
+
+var UnHealthy uint32 = 0
+var Healthy uint32 = 1
+
+type Features []Feature
+type Feature uint16
+
+const (
+ FeatureTcpNoDelay = Feature(0x03)
+ FeatureMutationToken = Feature(0x04) // XATTR bit in data type field with dcp mutations
+ FeatureXattr = Feature(0x06)
+ FeatureXerror = Feature(0x07)
+ FeatureSnappyCompression = Feature(0x0a)
+ FeatureDataType = Feature(0x0b) // This is named as JSON in kv_engine's feature.h
+ FeatureTracing = Feature(0x0f)
+ FeatureSyncReplication = Feature(0x11)
+ FeatureCollections = Feature(0x12)
+ FeatureSnappyEverywhere = Feature(0x13)
+ FeaturePreserveExpiry = Feature(0x14)
+ FeatureComputeUnits = Feature(0x1a)
+ FeatureHandleThrottle = Feature(0x1b)
+)
+
+type MemcachedConnection interface {
+ io.ReadWriteCloser
+
+ SetReadDeadline(time.Time) error
+ SetDeadline(time.Time) error
+}
+
+// The Client itself.
+type Client struct {
+ conn MemcachedConnection
+ // use uint32 type so that it can be accessed through atomic APIs
+ healthy uint32
+ opaque uint32
+
+ hdrBuf []byte
+
+ collectionsEnabled uint32
+ enabledFeatures map[Feature]bool
+ replica bool
+ deadline time.Time
+ bucket string
+ // If set, this takes precedence over the global variable ConnName
+ connName string
+
+ objPoolEnabled uint32
+ datapoolGetter func(uint64) ([]byte, error)
+ datapoolDone func([]byte)
+}
+
+var (
+ // ConnName is used if Client.connName is not set
+ ConnName = "GoMemcached"
+ DefaultDialTimeout = time.Duration(0) // No timeout
+
+ DefaultWriteTimeout = time.Duration(0) // No timeout
+
+ dialFun = func(prot, dest string) (net.Conn, error) {
+ return net.DialTimeout(prot, dest, DefaultDialTimeout)
+ }
+
+ datapoolDisabled = uint32(0)
+ datapoolInit = uint32(1)
+ datapoolInitDone = uint32(2)
+)
+
+func SetConnectionName(name string) {
+ ConnName = name
+}
+
+// Connect to a memcached server.
+func Connect(prot, dest string) (rv *Client, err error) {
+ conn, err := dialFun(prot, dest)
+ if err != nil {
+ return nil, err
+ }
+ return Wrap(conn)
+}
+
+// Connect to a memcached server using TLS.
+func ConnectTLS(prot, dest string, config *tls.Config) (rv *Client, err error) {
+ conn, err := tls.Dial(prot, dest, config)
+ if err != nil {
+ return nil, err
+ }
+ return Wrap(conn)
+}
+
+func SetDefaultTimeouts(dial, read, write time.Duration) {
+ DefaultDialTimeout = dial
+ DefaultWriteTimeout = write
+}
+
+func SetDefaultDialTimeout(dial time.Duration) {
+ DefaultDialTimeout = dial
+}
+
+func (c *Client) SetKeepAliveOptions(interval time.Duration) {
+ tcpConn, ok := c.conn.(*net.TCPConn)
+ if ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(interval)
+ }
+}
+
+func (c *Client) SetReadDeadline(t time.Time) {
+ c.conn.SetReadDeadline(t)
+}
+
+func (c *Client) SetDeadline(t time.Time) {
+ if t.Equal(c.deadline) {
+ return
+ }
+ c.conn.SetDeadline(t)
+ c.deadline = t
+}
+
+func (c *Client) getOpaque() uint32 {
+ if c.opaque >= math.MaxInt32 {
+ c.opaque = uint32(1)
+ }
+ return c.opaque + 1
+}
+
+// Wrap an existing transport.
+func Wrap(conn MemcachedConnection) (rv *Client, err error) {
+ client := &Client{
+ conn: conn,
+ hdrBuf: make([]byte, gomemcached.HDR_LEN),
+ opaque: uint32(1),
+ enabledFeatures: make(map[Feature]bool),
+ }
+ client.setHealthy(true)
+ return client, nil
+}
+
+// Close the connection when you're done.
+func (c *Client) Close() error {
+ return c.conn.Close()
+}
+
+// IsHealthy returns true unless the client is belived to have
+// difficulty communicating to its server.
+//
+// This is useful for connection pools where we want to
+// non-destructively determine that a connection may be reused.
+func (c Client) IsHealthy() bool {
+ healthyState := atomic.LoadUint32(&c.healthy)
+ return healthyState == Healthy
+}
+
+// Send a custom request and get the response.
+func (c *Client) Send(req *gomemcached.MCRequest) (rv *gomemcached.MCResponse, err error) {
+ err = c.Transmit(req)
+ if err != nil {
+ return
+ }
+ resp, _, err := getResponse(c.conn, c.hdrBuf)
+ if err == nil && resp.Opaque != req.Opaque {
+ logging.Errorf("Send: got response for opaque %v instead of response for opaque %v. req: %v -> res: %v",
+ resp.Opaque, req.Opaque, req, resp)
+ err = resp
+ resp.Status = gomemcached.EINVAL
+ c.setHealthy(false)
+ } else {
+ c.setHealthy(!gomemcached.IsFatal(err))
+ }
+ return resp, err
+}
+
+// Transmit send a request, but does not wait for a response.
+func (c *Client) Transmit(req *gomemcached.MCRequest) error {
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Now().Add(DefaultWriteTimeout))
+ }
+ _, err := transmitRequest(c.conn, req)
+ // clear write deadline to avoid interference with future write operations
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Time{})
+ }
+ if err != nil {
+ c.setHealthy(false)
+ }
+ return err
+}
+
+func (c *Client) TransmitWithDeadline(req *gomemcached.MCRequest, deadline time.Time) error {
+ c.conn.(net.Conn).SetWriteDeadline(deadline)
+
+ _, err := transmitRequest(c.conn, req)
+
+ // clear write deadline to avoid interference with future write operations
+ c.conn.(net.Conn).SetWriteDeadline(time.Time{})
+
+ if err != nil {
+ c.setHealthy(false)
+ }
+ return err
+}
+
+// TransmitResponse send a response, does not wait.
+func (c *Client) TransmitResponse(res *gomemcached.MCResponse) error {
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Now().Add(DefaultWriteTimeout))
+ }
+ _, err := transmitResponse(c.conn, res)
+ // clear write deadline to avoid interference with future write operations
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Time{})
+ }
+ if err != nil {
+ c.setHealthy(false)
+ }
+ return err
+}
+
+// Receive a response
+func (c *Client) Receive() (*gomemcached.MCResponse, error) {
+ var resp *gomemcached.MCResponse
+ var err error
+
+ if atomic.LoadUint32(&c.objPoolEnabled) == datapoolInitDone {
+ resp, _, err = getResponseWithPool(c.conn, c.hdrBuf, c.datapoolGetter, c.datapoolDone)
+ } else {
+ resp, _, err = getResponse(c.conn, c.hdrBuf)
+ }
+ if err != nil && !isNonFatalStatus(resp.Status) {
+ c.setHealthy(false)
+ }
+ return resp, err
+}
+
+func (c *Client) ReceiveWithDeadline(deadline time.Time) (*gomemcached.MCResponse, error) {
+ c.conn.(net.Conn).SetReadDeadline(deadline)
+
+ resp, _, err := getResponse(c.conn, c.hdrBuf)
+
+ // Clear read deadline to avoid interference with future read operations.
+ c.conn.(net.Conn).SetReadDeadline(time.Time{})
+
+ if err != nil && !isNonFatalStatus(resp.Status) {
+ c.setHealthy(false)
+ }
+ return resp, err
+}
+
+func isNonFatalStatus(status gomemcached.Status) bool {
+ return status == gomemcached.KEY_ENOENT ||
+ status == gomemcached.EBUSY ||
+ status == gomemcached.RANGE_SCAN_COMPLETE ||
+ status == gomemcached.RANGE_SCAN_MORE ||
+ status == gomemcached.KEY_EEXISTS ||
+ status == gomemcached.WOULD_THROTTLE ||
+ status == gomemcached.NOT_MY_VBUCKET ||
+ status == gomemcached.SUBDOC_BAD_MULTI
+}
+
+func appendMutationToken(bytes []byte) []byte {
+ bytes = append(bytes, 0, 0)
+ binary.BigEndian.PutUint16(bytes[len(bytes)-2:], uint16(0x04))
+ return bytes
+}
+
+func (c *Client) GetConnName() string {
+ if len(c.connName) > 0 {
+ return c.connName
+ }
+ return ConnName + ":" + uuid.New().String()
+}
+
+func (c *Client) SetConnName(name string) {
+ c.connName = name
+}
+
+// Send a hello command to enable MutationTokens
+func (c *Client) EnableMutationToken() (*gomemcached.MCResponse, error) {
+ var payload []byte
+ payload = appendMutationToken(payload)
+ connName := c.GetConnName()
+
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.HELLO,
+ Key: []byte(connName),
+ Body: payload,
+ })
+}
+
+// Send a hello command to enable specific features
+func (c *Client) EnableFeatures(features Features) (*gomemcached.MCResponse, error) {
+ var payload []byte
+ collectionsEnabled := 0
+ connName := c.GetConnName()
+
+ for _, feature := range features {
+ if feature == FeatureCollections {
+ collectionsEnabled = 1
+ }
+ payload = append(payload, 0, 0)
+ binary.BigEndian.PutUint16(payload[len(payload)-2:], uint16(feature))
+ }
+
+ rv, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.HELLO,
+ Key: []byte(connName),
+ Body: payload,
+ })
+
+ if err == nil {
+ collectionsEnabled = 0
+ body := rv.Body
+ if rv.Status != gomemcached.SUCCESS {
+ logging.Errorf("Client.EnableFeatures: Features can't be enabled: HELLO status = %v", rv.Status)
+ return nil, ErrUnSuccessfulHello
+ } else if rv.Opcode != gomemcached.HELLO {
+ logging.Errorf("Client.EnableFeatures: Invalid memcached HELLO response: opcode %v, expecting %v", rv.Opcode, gomemcached.HELLO)
+ return nil, ErrInvalidHello
+ } else {
+ for i := 0; len(body) > i; i += 2 {
+ feature := Feature(binary.BigEndian.Uint16(body[i:]))
+ c.enabledFeatures[feature] = true
+
+ if feature == FeatureCollections {
+ collectionsEnabled = 1
+ }
+ }
+ }
+ atomic.StoreUint32(&c.collectionsEnabled, uint32(collectionsEnabled))
+ }
+ return rv, err
+}
+
+// Sets collection and user info for a request
+func (c *Client) setContext(req *gomemcached.MCRequest, context ...*ClientContext) error {
+ req.CollIdLen = 0
+ req.UserLen = 0
+ collectionId := uint32(0)
+ collectionsEnabled := atomic.LoadUint32(&c.collectionsEnabled)
+ if len(context) > 0 {
+ collectionId = context[0].CollId
+ uLen := len(context[0].User)
+
+ // we take collections enabled as an indicator that the node understands impersonation
+ // since we don't have a specific feature for it.
+ if collectionsEnabled > 0 && uLen > 0 && uLen <= gomemcached.MAX_USER_LEN {
+ req.UserLen = uLen
+ copy(req.Username[:uLen], context[0].User)
+ }
+
+ if context[0].PreserveExpiry {
+ if !c.IsFeatureEnabled(FeaturePreserveExpiry) {
+ return ErrPreserveExpiryNotSupported
+ }
+ req.FramingExtras = append(req.FramingExtras,
+ gomemcached.FrameInfo{gomemcached.FramePreserveExpiry, 0, []byte("")})
+ }
+
+ if context[0].DurabilityLevel >= gomemcached.DuraMajority {
+ if !c.IsFeatureEnabled(FeatureSyncReplication) {
+ return ErrDurabilityNotSupported
+ }
+ data := make([]byte, 3)
+ data[0] = byte(context[0].DurabilityLevel)
+ len := 1
+ if context[0].DurabilityTimeout > 0 {
+ durabilityTimeoutMillis := context[0].DurabilityTimeout / time.Millisecond
+ if durabilityTimeoutMillis > math.MaxUint16 {
+ durabilityTimeoutMillis = math.MaxUint16
+ }
+ binary.BigEndian.PutUint16(data[1:3], uint16(durabilityTimeoutMillis))
+ len += 2
+ }
+ req.FramingExtras = append(req.FramingExtras,
+ gomemcached.FrameInfo{gomemcached.FrameDurability, len, data})
+ }
+ }
+ // any context with compressed set
+ for _, c := range context {
+ if c.Compressed {
+ req.DataType = gomemcached.DatatypeFlagJSON | gomemcached.DatatypeFlagCompressed
+ break
+ }
+ }
+
+ // if the optional collection is specified, it must be default for clients that haven't turned on collections
+ if c.collectionsEnabled == 0 {
+ if collectionId != 0 {
+ return fmt.Errorf("Client does not use collections but a collection was specified")
+ }
+ } else {
+ req.CollIdLen = binary.PutUvarint(req.CollId[:], uint64(collectionId))
+ }
+ return nil
+}
+
+// Sets collection info in extras
+func (c *Client) setExtrasContext(req *gomemcached.MCRequest, context ...*ClientContext) error {
+ collectionId := uint32(0)
+ xattrs := false
+ req.UserLen = 0
+ if len(context) > 0 {
+ collectionId = context[0].CollId
+ uLen := len(context[0].User)
+ if uLen > 0 && uLen <= gomemcached.MAX_USER_LEN {
+ req.UserLen = uLen
+ copy(req.Username[:], context[0].User)
+ }
+ xattrs = context[0].IncludeXATTRs
+ }
+
+ // if the optional collection is specified, it must be default for clients that haven't turned on collections
+ if atomic.LoadUint32(&c.collectionsEnabled) == 0 {
+ if collectionId != 0 {
+ return fmt.Errorf("Client does not use collections but a collection was specified")
+ }
+ } else {
+ if xattrs {
+ req.Extras = make([]byte, 5)
+ req.Extras[4] = 0x1 // protocol specifies only != 0
+ } else {
+ req.Extras = make([]byte, 4)
+ }
+ binary.BigEndian.PutUint32(req.Extras, collectionId)
+ }
+ return nil
+}
+
+func (c *Client) setVbSeqnoContext(req *gomemcached.MCRequest, context ...*ClientContext) error {
+ if len(context) == 0 || req == nil {
+ return nil
+ }
+
+ switch req.Opcode {
+ case gomemcached.GET_ALL_VB_SEQNOS:
+ if len(context) == 0 {
+ return nil
+ }
+
+ if len(req.Extras) == 0 {
+ context[0].InitExtras(req, c)
+ }
+ if context[0].VbState != nil {
+ binary.BigEndian.PutUint32(req.Extras, uint32(*(context[0].VbState)))
+ }
+ if c.CollectionEnabled() {
+ binary.BigEndian.PutUint32(req.Extras[4:8], context[0].CollId)
+ }
+ return nil
+ default:
+ return fmt.Errorf("setVbState Not supported for opcode: %v", req.Opcode.String())
+ }
+}
+
+// Get the value for a key.
+func (c *Client) Get(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ VBucket: vb,
+ Key: []byte(key),
+ Opaque: c.getOpaque(),
+ }
+ if c.replica {
+ req.Opcode = gomemcached.GET_REPLICA
+ } else {
+ req.Opcode = gomemcached.GET
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
+}
+
+// Get the xattrs, doc value for the input key
+func (c *Client) GetSubdoc(vb uint16, key string, subPaths []string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.SUBDOC_MULTI_LOOKUP,
+ VBucket: vb,
+ Key: []byte(key),
+ Opaque: c.getOpaque(),
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ req.Extras, req.Body = GetSubDocVal(subPaths, context)
+
+ res, err := c.Send(req)
+
+ if err != nil && IfResStatusError(res) {
+ return res, err
+ }
+ return res, nil
+}
+
+func (c *Client) SetSubdoc(vb uint16, key string, ops []SubDocOp, addOnly bool, exp int, cas uint64, context ...*ClientContext) (
+ *gomemcached.MCResponse, error) {
+
+ if len(ops) == 0 {
+ return nil, fmt.Errorf("Invalid input - no operations")
+ }
+
+ totalBytesLen := 0
+ for i := range ops {
+ totalBytesLen += ops[i].encodedLength()
+ }
+ valueBuf := make([]byte, 0, totalBytesLen)
+ del := false
+ for i := range ops {
+ if ops[i].Value == nil {
+ del = true
+ }
+ valueBuf = ops[i].encode(valueBuf)
+ }
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.SUBDOC_MULTI_MUTATION,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: []byte{0, 0, 0, 0, 0},
+ Body: valueBuf,
+ Opaque: c.getOpaque(),
+ Cas: cas,
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ binary.BigEndian.PutUint32(req.Extras, uint32(exp))
+ if !del {
+ if addOnly {
+ req.Extras[4] = gomemcached.SUBDOC_FLAG_ADD
+ } else {
+ req.Extras[4] = gomemcached.SUBDOC_FLAG_MKDOC
+ }
+ }
+
+ res, err := c.Send(req)
+ if err != nil {
+ return nil, err
+ } else if !IfResStatusError(res) {
+ return nil, fmt.Errorf("operation failed: %v (len=%v)", res.Status, len(res.Body))
+ }
+ return res, nil
+}
+
+// Retrieve the collections manifest.
+func (c *Client) GetCollectionsManifest() (*gomemcached.MCResponse, error) {
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GET_COLLECTIONS_MANIFEST,
+ Opaque: c.getOpaque(),
+ })
+
+ if err != nil && IfResStatusError(res) {
+ return res, err
+ }
+ return res, nil
+}
+
+// Retrieve the collections manifest.
+func (c *Client) CollectionsGetCID(scope string, collection string) (*gomemcached.MCResponse, error) {
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.COLLECTIONS_GET_CID,
+ Body: []byte(scope + "." + collection),
+ Opaque: c.getOpaque(),
+ })
+
+ if err != nil && IfResStatusError(res) {
+ return res, err
+ }
+ return res, nil
+}
+
+func (c *Client) CollectionEnabled() bool {
+ return atomic.LoadUint32(&c.collectionsEnabled) > 0
+}
+
+func (c *Client) IsFeatureEnabled(feature Feature) bool {
+ enabled, ok := c.enabledFeatures[feature]
+ return ok && enabled
+}
+
+// Get the value for a key, and update expiry
+func (c *Client) GetAndTouch(vb uint16, key string, exp int, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ extraBuf := make([]byte, 4)
+ binary.BigEndian.PutUint32(extraBuf[0:], uint32(exp))
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.GAT,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: extraBuf,
+ Opaque: c.getOpaque(),
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
+}
+
+// Get metadata for a key
+func (c *Client) GetMeta(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.GET_META,
+ VBucket: vb,
+ Key: []byte(key),
+ Opaque: c.getOpaque(),
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
+}
+
+// Del deletes a key.
+func (c *Client) Del(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.DELETE,
+ VBucket: vb,
+ Key: []byte(key),
+ Opaque: c.getOpaque(),
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
+}
+
+// Get a random document
+func (c *Client) GetRandomDoc(context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.GET_RANDOM_KEY,
+ Opaque: c.getOpaque(),
+ }
+ err := c.setExtrasContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
+}
+
+// AuthList lists SASL auth mechanisms.
+func (c *Client) AuthList() (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SASL_LIST_MECHS})
+}
+
+// Auth performs SASL PLAIN authentication against the server.
+func (c *Client) Auth(user, pass string) (*gomemcached.MCResponse, error) {
+ res, err := c.AuthList()
+
+ if err != nil {
+ return res, err
+ }
+
+ authMech := string(res.Body)
+ if strings.Index(authMech, "PLAIN") != -1 {
+ return c.AuthPlain(user, pass)
+ }
+ return nil, fmt.Errorf("auth mechanism PLAIN not supported")
+}
+
+// AuthScramSha performs SCRAM-SHA authentication against the server.
+func (c *Client) AuthScramSha(user, pass string) (*gomemcached.MCResponse, error) {
+ res, err := c.AuthList()
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to obtain list of methods.")
+ }
+
+ methods := string(res.Body)
+ method, err := scramsha.BestMethod(methods)
+ if err != nil {
+ return nil, errors.Wrap(err,
+ "Unable to select SCRAM-SHA method.")
+ }
+
+ s, err := scramsha.NewScramSha(method)
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to initialize scramsha.")
+ }
+
+ logging.Infof("Using %v authentication for user %v%v%v", method, gomemcached.UdTagBegin, user, gomemcached.UdTagEnd)
+
+ message, err := s.GetStartRequest(user)
+ if err != nil {
+ return nil, errors.Wrapf(err,
+ "Error building start request for user %s.", user)
+ }
+
+ startRequest := &gomemcached.MCRequest{
+ Opcode: gomemcached.SASL_AUTH,
+ Key: []byte(method),
+ Body: []byte(message)}
+
+ startResponse, err := c.Send(startRequest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error sending start request.")
+ }
+
+ err = s.HandleStartResponse(string(startResponse.Body))
+ if err != nil {
+ return nil, errors.Wrap(err, "Error handling start response.")
+ }
+
+ message = s.GetFinalRequest(pass)
+
+ // send step request
+ finalRequest := &gomemcached.MCRequest{
+ Opcode: gomemcached.SASL_STEP,
+ Key: []byte(method),
+ Body: []byte(message)}
+ finalResponse, err := c.Send(finalRequest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error sending final request.")
+ }
+
+ err = s.HandleFinalResponse(string(finalResponse.Body))
+ if err != nil {
+ return nil, errors.Wrap(err, "Error handling final response.")
+ }
+
+ return finalResponse, nil
+}
+
+func (c *Client) AuthPlain(user, pass string) (*gomemcached.MCResponse, error) {
+ if len(user) > 0 && user[0] != '@' {
+ logging.Infof("Using plain authentication for user %v%v%v", gomemcached.UdTagBegin, user, gomemcached.UdTagEnd)
+ }
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SASL_AUTH,
+ Key: []byte("PLAIN"),
+ Body: []byte(fmt.Sprintf("\x00%s\x00%s", user, pass))})
+}
+
+// select bucket
+func (c *Client) SelectBucket(bucket string) (*gomemcached.MCResponse, error) {
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SELECT_BUCKET,
+ Key: []byte(bucket)})
+ if res != nil {
+ c.bucket = bucket
+ }
+ return res, err
+}
+
+func (c *Client) LastBucket() string {
+ return c.bucket
+}
+
+// Read from replica setting
+func (c *Client) SetReplica(r bool) {
+ c.replica = r
+}
+
+func (c *Client) Replica() bool {
+ return c.replica
+}
+
+func (c *Client) store(opcode gomemcached.CommandCode, vb uint16,
+ key string, flags int, exp int, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: opcode,
+ VBucket: vb,
+ Key: []byte(key),
+ Cas: 0,
+ Opaque: c.getOpaque(),
+ Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0},
+ Body: body}
+
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
+ return c.Send(req)
+}
+
+func (c *Client) storeCas(opcode gomemcached.CommandCode, vb uint16,
+ key string, flags int, exp int, cas uint64, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: opcode,
+ VBucket: vb,
+ Key: []byte(key),
+ Cas: cas,
+ Opaque: c.getOpaque(),
+ Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0},
+ Body: body}
+
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+
+ binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
+ return c.Send(req)
+}
+
+// Incr increments the value at the given key.
+func (c *Client) Incr(vb uint16, key string,
+ amt, def uint64, exp int, context ...*ClientContext) (uint64, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.INCREMENT,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: make([]byte, 8+8+4),
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return 0, err
+ }
+
+ binary.BigEndian.PutUint64(req.Extras[:8], amt)
+ binary.BigEndian.PutUint64(req.Extras[8:16], def)
+ binary.BigEndian.PutUint32(req.Extras[16:20], uint32(exp))
+
+ resp, err := c.Send(req)
+ if err != nil {
+ return 0, err
+ }
+
+ return binary.BigEndian.Uint64(resp.Body), nil
+}
+
+// Decr decrements the value at the given key.
+func (c *Client) Decr(vb uint16, key string,
+ amt, def uint64, exp int, context ...*ClientContext) (uint64, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.DECREMENT,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: make([]byte, 8+8+4),
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return 0, err
+ }
+
+ binary.BigEndian.PutUint64(req.Extras[:8], amt)
+ binary.BigEndian.PutUint64(req.Extras[8:16], def)
+ binary.BigEndian.PutUint32(req.Extras[16:20], uint32(exp))
+
+ resp, err := c.Send(req)
+ if err != nil {
+ return 0, err
+ }
+
+ return binary.BigEndian.Uint64(resp.Body), nil
+}
+
+// Add a value for a key (store if not exists).
+func (c *Client) Add(vb uint16, key string, flags int, exp int,
+ body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ return c.store(gomemcached.ADD, vb, key, flags, exp, body, context...)
+}
+
+// Set the value for a key.
+func (c *Client) Set(vb uint16, key string, flags int, exp int,
+ body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ return c.store(gomemcached.SET, vb, key, flags, exp, body, context...)
+}
+
+// SetCas set the value for a key with cas
+func (c *Client) SetCas(vb uint16, key string, flags int, exp int, cas uint64,
+ body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ return c.storeCas(gomemcached.SET, vb, key, flags, exp, cas, body, context...)
+}
+
+// Append data to the value of a key.
+func (c *Client) Append(vb uint16, key string, data []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.APPEND,
+ VBucket: vb,
+ Key: []byte(key),
+ Cas: 0,
+ Opaque: c.getOpaque(),
+ Body: data}
+
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
+}
+
+// GetBulk gets keys in bulk
+func (c *Client) GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string, context ...*ClientContext) error {
+ if len(keys) == 1 && len(subPaths) == 0 {
+ res, err := c.Get(vb, keys[0], context...)
+ if res != nil {
+ if res.Status == gomemcached.SUCCESS {
+ rv[keys[0]] = res
+ } else if res.Status == gomemcached.KEY_ENOENT {
+
+ // GetBulk never returns a ENOENT
+ err = nil
+ }
+ }
+ return err
+ }
+
+ stopch := make(chan bool)
+ var wg sync.WaitGroup
+
+ defer func() {
+ close(stopch)
+ wg.Wait()
+ }()
+
+ if (math.MaxInt32 - c.opaque) < (uint32(len(keys)) + 1) {
+ c.opaque = uint32(1)
+ }
+
+ opStart := c.opaque
+
+ errch := make(chan error, 2)
+
+ wg.Add(1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ logging.Infof("Recovered in f %v", r)
+ }
+ errch <- nil
+ wg.Done()
+ }()
+
+ ok := true
+ var savedErr error
+ for ok {
+
+ select {
+ case <-stopch:
+ return
+ default:
+ res, err := c.Receive()
+
+ if err != nil && IfResStatusError(res) {
+
+ // continue receiving in case of KEY_ENOENT and WOULD_THROTTLE
+ if res != nil && res.Status == gomemcached.KEY_ENOENT {
+ continue
+ } else if res != nil && res.Status == gomemcached.WOULD_THROTTLE {
+
+ // if we have bee throttled, flag that there are keys still to fetch
+ // the last throttle wins
+ savedErr = err
+ continue
+ } else {
+ c.setHealthy(false) // who knows what's left to be received
+ errch <- err
+ return
+ }
+ } else if res.Opcode == gomemcached.GET ||
+ res.Opcode == gomemcached.GET_REPLICA ||
+ res.Opcode == gomemcached.SUBDOC_GET ||
+ res.Opcode == gomemcached.SUBDOC_MULTI_LOOKUP {
+ opaque := res.Opaque - opStart
+ if opaque < 0 || opaque >= uint32(len(keys)) {
+ // Every now and then we seem to be seeing an invalid opaque
+ // value returned from the server. When this happens log the error
+ // and the calling function will retry the bulkGet. MB-15140
+ logging.Errorf(" Invalid opaque Value. Debug info : Res.opaque : %v(%v), Keys %v, Response received %v \n key list %v this key %v", res.Opaque, opaque, len(keys), res, keys, string(res.Body))
+ c.setHealthy(false) // who knows what's left to be received
+ errch <- fmt.Errorf("Out of Bounds error")
+ return
+ }
+
+ rv[keys[opaque]] = res
+ }
+
+ if res.Opcode == gomemcached.NOOP {
+ ok = false
+
+ // notify of the throttle
+ if savedErr != nil {
+ errch <- savedErr
+ }
+ }
+ }
+ }
+ }()
+
+ memcachedReqPkt := &gomemcached.MCRequest{
+ VBucket: vb,
+ }
+ if c.replica {
+ memcachedReqPkt.Opcode = gomemcached.GET_REPLICA
+ } else {
+ memcachedReqPkt.Opcode = gomemcached.GET
+ }
+ err := c.setContext(memcachedReqPkt, context...)
+ if err != nil {
+ return err
+ }
+
+ if len(subPaths) > 0 {
+ memcachedReqPkt.Extras, memcachedReqPkt.Body = GetSubDocVal(subPaths, context)
+ memcachedReqPkt.Opcode = gomemcached.SUBDOC_MULTI_LOOKUP
+ }
+
+ for _, k := range keys { // Start of Get request
+ memcachedReqPkt.Key = []byte(k)
+ memcachedReqPkt.Opaque = c.opaque
+
+ err := c.Transmit(memcachedReqPkt)
+ if err != nil {
+ logging.Errorf("Transmit failed in GetBulkAll for key '%v': %v", k, err)
+ return err
+ }
+ c.opaque++
+ } // End of Get request
+
+ // finally transmit a NOOP
+ err = c.Transmit(&gomemcached.MCRequest{
+ Opcode: gomemcached.NOOP,
+ VBucket: vb,
+ Opaque: c.opaque,
+ })
+
+ if err != nil {
+ logging.Errorf(" Transmit of NOOP failed %v", err)
+ return err
+ }
+ c.opaque++
+
+ return <-errch
+}
+
+func GetSubDocVal(subPaths []string, context []*ClientContext) (extraBuf, valueBuf []byte) {
+
+ // SubdocFlagXattrPath indicates that the path refers to an Xattr rather than the document body.
+ flag := uint8(gomemcached.SUBDOC_FLAG_XATTR)
+ for i := range context {
+ if context[i].DocumentSubDocPaths {
+ flag = 0
+ break
+ }
+ }
+
+ var ops []string
+ totalBytesLen := 0
+ num := 1
+
+ for _, v := range subPaths {
+ totalBytesLen = totalBytesLen + len([]byte(v))
+ ops = append(ops, v)
+ num = num + 1
+ }
+
+ if flag != 0 {
+ // Xattr retrieval - subdoc multi get
+ // Set deleted true only if it is not expiration
+ if len(subPaths) != 1 || subPaths[0] != "$document.exptime" {
+ extraBuf = append(extraBuf, uint8(0x04))
+ }
+ }
+
+ valueBuf = make([]byte, num*4+totalBytesLen)
+
+ //opcode for subdoc get
+ op := gomemcached.SUBDOC_GET
+
+ // Calculate path total bytes
+ // There are 2 ops - get xattrs - both input and $document and get whole doc
+ valIter := 0
+
+ for _, v := range ops {
+ pathBytes := []byte(v)
+ valueBuf[valIter+0] = uint8(op)
+ valueBuf[valIter+1] = flag
+
+ // 2 byte key
+ binary.BigEndian.PutUint16(valueBuf[valIter+2:], uint16(len(pathBytes)))
+
+ // Then n bytes path
+ copy(valueBuf[valIter+4:], pathBytes)
+ valIter = valIter + 4 + len(pathBytes)
+ }
+
+ return
+}
+
+func (c *Client) CreateRangeScan(vb uint16, start []byte, excludeStart bool, end []byte, excludeEnd bool,
+ withDocs bool, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.CREATE_RANGE_SCAN,
+ VBucket: vb,
+ DataType: JSONDataType,
+ Opaque: c.opaque,
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+
+ collId := uint32(0)
+ xattrs := false
+ if len(context) > 0 {
+ collId = context[0].CollId
+ xattrs = withDocs && context[0].IncludeXATTRs
+ }
+ req.CollIdLen = 0 // has to be 0 else op is rejected
+ r := make(map[string]interface{})
+ if excludeStart {
+ r["excl_start"] = base64.StdEncoding.EncodeToString(start)
+ } else {
+ r["start"] = base64.StdEncoding.EncodeToString(start)
+ }
+ if excludeEnd {
+ r["excl_end"] = base64.StdEncoding.EncodeToString(end)
+ } else {
+ r["end"] = base64.StdEncoding.EncodeToString(end)
+ }
+ m := make(map[string]interface{})
+ if collId == 0 && len(context) > 0 {
+ collId = context[0].CollId
+ }
+ m["collection"] = fmt.Sprintf("%x", collId)
+ if !withDocs {
+ m["key_only"] = true
+ }
+ m["range"] = r
+ if xattrs {
+ m["include_xattrs"] = true
+ }
+ req.Body, _ = json.Marshal(m)
+
+ c.opaque++
+ return c.Send(req)
+}
+
+func (c *Client) CreateRandomScan(vb uint16, sampleSize int, withDocs bool, context ...*ClientContext) (
+ *gomemcached.MCResponse, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.CREATE_RANGE_SCAN,
+ VBucket: vb,
+ DataType: JSONDataType,
+ Opaque: c.opaque,
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+
+ collId := uint32(0)
+ xattrs := false
+ if len(context) > 0 {
+ collId = context[0].CollId
+ xattrs = withDocs && context[0].IncludeXATTRs
+ }
+ req.CollIdLen = 0 // has to be 0 else op is rejected
+ s := make(map[string]interface{})
+ seed := uint32(rand.Int())
+ if seed == 0 {
+ seed = RandomScanSeed
+ }
+ s["seed"] = seed
+ s["samples"] = sampleSize
+ m := make(map[string]interface{})
+ if collId == 0 && len(context) > 0 {
+ collId = context[0].CollId
+ }
+ m["collection"] = fmt.Sprintf("%x", collId)
+ if !withDocs {
+ m["key_only"] = true
+ }
+ m["sampling"] = s
+ if xattrs {
+ m["include_xattrs"] = true
+ }
+ req.Body, _ = json.Marshal(m)
+
+ c.opaque++
+ return c.Send(req)
+}
+
+func (c *Client) ContinueRangeScan(vb uint16, uuid []byte, opaque uint32, items uint32, timeout uint32, maxSize uint32,
+ context ...*ClientContext) error {
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.CONTINUE_RANGE_SCAN,
+ VBucket: vb,
+ Extras: make([]byte, 28),
+ Opaque: opaque,
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return err
+ }
+ req.CollIdLen = 0 // has to be 0 else op is rejected
+ copy(req.Extras, uuid)
+ binary.BigEndian.PutUint32(req.Extras[16:], items)
+ binary.BigEndian.PutUint32(req.Extras[20:], timeout)
+ binary.BigEndian.PutUint32(req.Extras[24:], maxSize)
+ return c.Transmit(req)
+}
+
+func (c *Client) CancelRangeScan(vb uint16, uuid []byte, opaque uint32, context ...*ClientContext) (
+ *gomemcached.MCResponse, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.CANCEL_RANGE_SCAN,
+ VBucket: vb,
+ Extras: make([]byte, 16),
+ Opaque: opaque,
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ req.CollIdLen = 0 // has to be 0 else op is rejected
+ copy(req.Extras, uuid)
+ return c.Send(req)
+}
+
+func (c *Client) ValidateKey(vb uint16, key string, context ...*ClientContext) (bool, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.REPLACE,
+ VBucket: vb,
+ Opaque: c.opaque,
+ Extras: make([]byte, 8),
+ Key: []byte(key),
+ Cas: 0xffffffffffffffff,
+ }
+ err := c.setContext(req, context...)
+ if err != nil {
+ return false, err
+ }
+ resp, err := c.Send(req)
+ if resp.Status == gomemcached.KEY_EEXISTS {
+ return true, nil
+ } else if resp.Status == gomemcached.KEY_ENOENT {
+ return false, nil
+ }
+ return false, err
+}
+
+// ObservedStatus is the type reported by the Observe method
+type ObservedStatus uint8
+
+// Observation status values.
+const (
+ ObservedNotPersisted = ObservedStatus(0x00) // found, not persisted
+ ObservedPersisted = ObservedStatus(0x01) // found, persisted
+ ObservedNotFound = ObservedStatus(0x80) // not found (or a persisted delete)
+ ObservedLogicallyDeleted = ObservedStatus(0x81) // pending deletion (not persisted yet)
+)
+
+// ObserveResult represents the data obtained by an Observe call
+type ObserveResult struct {
+ Status ObservedStatus // Whether the value has been persisted/deleted
+ Cas uint64 // Current value's CAS
+ PersistenceTime time.Duration // Node's average time to persist a value
+ ReplicationTime time.Duration // Node's average time to replicate a value
+}
+
+// Observe gets the persistence/replication/CAS state of a key
+func (c *Client) Observe(vb uint16, key string) (result ObserveResult, err error) {
+ // http://www.couchbase.com/wiki/display/couchbase/Observe
+ body := make([]byte, 4+len(key))
+ binary.BigEndian.PutUint16(body[0:2], vb)
+ binary.BigEndian.PutUint16(body[2:4], uint16(len(key)))
+ copy(body[4:4+len(key)], key)
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.OBSERVE,
+ VBucket: vb,
+ Body: body,
+ })
+ if err != nil {
+ return
+ }
+
+ // Parse the response data from the body:
+ if len(res.Body) < 2+2+1 {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ outVb := binary.BigEndian.Uint16(res.Body[0:2])
+ keyLen := binary.BigEndian.Uint16(res.Body[2:4])
+ if len(res.Body) < 2+2+int(keyLen)+1+8 {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ outKey := string(res.Body[4 : 4+keyLen])
+ if outVb != vb || outKey != key {
+ err = fmt.Errorf("observe returned wrong vbucket/key: %d/%q", outVb, outKey)
+ return
+ }
+ result.Status = ObservedStatus(res.Body[4+keyLen])
+ result.Cas = binary.BigEndian.Uint64(res.Body[5+keyLen:])
+ // The response reuses the Cas field to store time statistics:
+ result.PersistenceTime = time.Duration(res.Cas>>32) * time.Millisecond
+ result.ReplicationTime = time.Duration(res.Cas&math.MaxUint32) * time.Millisecond
+ return
+}
+
+// CheckPersistence checks whether a stored value has been persisted to disk yet.
+func (result ObserveResult) CheckPersistence(cas uint64, deletion bool) (persisted bool, overwritten bool) {
+ switch {
+ case result.Status == ObservedNotFound && deletion:
+ persisted = true
+ case result.Cas != cas:
+ overwritten = true
+ case result.Status == ObservedPersisted:
+ persisted = true
+ }
+ return
+}
+
+// Sequence number based Observe Implementation
+type ObserveSeqResult struct {
+ Failover uint8 // Set to 1 if a failover took place
+ VbId uint16 // vbucket id
+ Vbuuid uint64 // vucket uuid
+ LastPersistedSeqNo uint64 // last persisted sequence number
+ CurrentSeqNo uint64 // current sequence number
+ OldVbuuid uint64 // Old bucket vbuuid
+ LastSeqNo uint64 // last sequence number received before failover
+}
+
+func (c *Client) ObserveSeq(vb uint16, vbuuid uint64) (result *ObserveSeqResult, err error) {
+ // http://www.couchbase.com/wiki/display/couchbase/Observe
+ body := make([]byte, 8)
+ binary.BigEndian.PutUint64(body[0:8], vbuuid)
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.OBSERVE_SEQNO,
+ VBucket: vb,
+ Body: body,
+ Opaque: 0x01,
+ })
+ if err != nil {
+ return
+ }
+
+ if res.Status != gomemcached.SUCCESS {
+ return nil, fmt.Errorf(" Observe returned error %v", res.Status)
+ }
+
+ // Parse the response data from the body:
+ if len(res.Body) < (1 + 2 + 8 + 8 + 8) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+
+ result = &ObserveSeqResult{}
+ result.Failover = res.Body[0]
+ result.VbId = binary.BigEndian.Uint16(res.Body[1:3])
+ result.Vbuuid = binary.BigEndian.Uint64(res.Body[3:11])
+ result.LastPersistedSeqNo = binary.BigEndian.Uint64(res.Body[11:19])
+ result.CurrentSeqNo = binary.BigEndian.Uint64(res.Body[19:27])
+
+ // in case of failover processing we can have old vbuuid and the last persisted seq number
+ if result.Failover == 1 && len(res.Body) >= (1+2+8+8+8+8+8) {
+ result.OldVbuuid = binary.BigEndian.Uint64(res.Body[27:35])
+ result.LastSeqNo = binary.BigEndian.Uint64(res.Body[35:43])
+ }
+
+ return
+}
+
+// CasOp is the type of operation to perform on this CAS loop.
+type CasOp uint8
+
+const (
+ // CASStore instructs the server to store the new value normally
+ CASStore = CasOp(iota)
+ // CASQuit instructs the client to stop attempting to CAS, leaving value untouched
+ CASQuit
+ // CASDelete instructs the server to delete the current value
+ CASDelete
+)
+
+// User specified termination is returned as an error.
+func (c CasOp) Error() string {
+ switch c {
+ case CASStore:
+ return "CAS store"
+ case CASQuit:
+ return "CAS quit"
+ case CASDelete:
+ return "CAS delete"
+ }
+ panic("Unhandled value")
+}
+
+//////// CAS TRANSFORM
+
+// CASState tracks the state of CAS over several operations.
+//
+// This is used directly by CASNext and indirectly by CAS
+type CASState struct {
+ initialized bool // false on the first call to CASNext, then true
+ Value []byte // Current value of key; update in place to new value
+ Cas uint64 // Current CAS value of key
+ Exists bool // Does a value exist for the key? (If not, Value will be nil)
+ Err error // Error, if any, after CASNext returns false
+ resp *gomemcached.MCResponse
+}
+
+// CASNext is a non-callback, loop-based version of CAS method.
+//
+// Usage is like this:
+//
+// var state memcached.CASState
+//
+// for client.CASNext(vb, key, exp, &state) {
+// state.Value = some_mutation(state.Value)
+// }
+//
+// if state.Err != nil { ... }
+func (c *Client) CASNext(vb uint16, k string, exp int, state *CASState) bool {
+ if state.initialized {
+ if !state.Exists {
+ // Adding a new key:
+ if state.Value == nil {
+ state.Cas = 0
+ return false // no-op (delete of non-existent value)
+ }
+ state.resp, state.Err = c.Add(vb, k, 0, exp, state.Value)
+ } else {
+ // Updating / deleting a key:
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.DELETE,
+ VBucket: vb,
+ Key: []byte(k),
+ Cas: state.Cas}
+ if state.Value != nil {
+ req.Opcode = gomemcached.SET
+ req.Opaque = 0
+ req.Extras = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ req.Body = state.Value
+
+ flags := 0
+ binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
+ }
+ state.resp, state.Err = c.Send(req)
+ }
+
+ // If the response status is KEY_EEXISTS or NOT_STORED there's a conflict and we'll need to
+ // get the new value (below). Otherwise, we're done (either success or failure) so return:
+ if !(state.resp != nil && (state.resp.Status == gomemcached.KEY_EEXISTS ||
+ state.resp.Status == gomemcached.NOT_STORED)) {
+ state.Cas = state.resp.Cas
+ return false // either success or fatal error
+ }
+ }
+
+ // Initial call, or after a conflict: GET the current value and CAS and return them:
+ state.initialized = true
+ if state.resp, state.Err = c.Get(vb, k); state.Err == nil {
+ state.Exists = true
+ state.Value = state.resp.Body
+ state.Cas = state.resp.Cas
+ } else if state.resp != nil && state.resp.Status == gomemcached.KEY_ENOENT {
+ state.Err = nil
+ state.Exists = false
+ state.Value = nil
+ state.Cas = 0
+ } else {
+ return false // fatal error
+ }
+ return true // keep going...
+}
+
+// CasFunc is type type of function to perform a CAS transform.
+//
+// Input is the current value, or nil if no value exists.
+// The function should return the new value (if any) to set, and the store/quit/delete operation.
+type CasFunc func(current []byte) ([]byte, CasOp)
+
+// CAS performs a CAS transform with the given function.
+//
+// If the value does not exist, a nil current value will be sent to f.
+func (c *Client) CAS(vb uint16, k string, f CasFunc,
+ initexp int) (*gomemcached.MCResponse, error) {
+ var state CASState
+ for c.CASNext(vb, k, initexp, &state) {
+ newValue, operation := f(state.Value)
+ if operation == CASQuit || (operation == CASDelete && state.Value == nil) {
+ return nil, operation
+ }
+ state.Value = newValue
+ }
+ return state.resp, state.Err
+}
+
+// StatValue is one of the stats returned from the Stats method.
+type StatValue struct {
+ // The stat key
+ Key string
+ // The stat value
+ Val string
+}
+
+// Stats requests server-side stats.
+//
+// Use "" as the stat key for toplevel stats.
+func (c *Client) Stats(key string) ([]StatValue, error) {
+ rv := make([]StatValue, 0, 128)
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return rv, err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return rv, err
+ }
+ k := string(res.Key)
+ if k == "" {
+ break
+ }
+ rv = append(rv, StatValue{
+ Key: k,
+ Val: string(res.Body),
+ })
+ }
+ return rv, nil
+}
+
+// Stats requests server-side stats.
+//
+// Use "" as the stat key for toplevel stats.
+func (c *Client) StatsFunc(key string, fn func(key, val []byte)) error {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return err
+ }
+ if len(res.Key) == 0 {
+ break
+ }
+ fn(res.Key, res.Body)
+ }
+ return nil
+}
+
+// StatsMap requests server-side stats similarly to Stats, but returns
+// them as a map.
+//
+// Use "" as the stat key for toplevel stats.
+func (c *Client) StatsMap(key string) (map[string]string, error) {
+ rv := make(map[string]string)
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return rv, err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return rv, err
+ }
+ k := string(res.Key)
+ if k == "" {
+ break
+ }
+ rv[k] = string(res.Body)
+ }
+
+ return rv, nil
+}
+
+// instead of returning a new statsMap, simply populate passed in statsMap, which contains all the keys
+// for which stats needs to be retrieved
+func (c *Client) StatsMapForSpecifiedStats(key string, statsMap map[string]string) error {
+
+ // clear statsMap
+ for key, _ := range statsMap {
+ statsMap[key] = ""
+ }
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return err
+ }
+ k := string(res.Key)
+ if k == "" {
+ break
+ }
+ if _, ok := statsMap[k]; ok {
+ statsMap[k] = string(res.Body)
+ }
+ }
+
+ return nil
+}
+
+// UprGetFailoverLog for given list of vbuckets.
+func (mc *Client) UprGetFailoverLog(vb []uint16) (map[uint16]*FailoverLog, error) {
+
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_FAILOVERLOG,
+ Opaque: opaqueFailover,
+ }
+
+ failoverLogs := make(map[uint16]*FailoverLog)
+ for _, vBucket := range vb {
+ rq.VBucket = vBucket
+ if err := mc.Transmit(rq); err != nil {
+ return nil, err
+ }
+ res, err := mc.Receive()
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to receive %s", err.Error())
+ } else if res.Opcode != gomemcached.UPR_FAILOVERLOG || res.Status != gomemcached.SUCCESS {
+ return nil, fmt.Errorf("unexpected #opcode %v", res.Opcode)
+ }
+
+ flog, err := parseFailoverLog(res.Body)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse failover logs for vb %d", vb)
+ }
+ failoverLogs[vBucket] = flog
+ }
+
+ return failoverLogs, nil
+}
+
+// Hijack exposes the underlying connection from this client.
+//
+// It also marks the connection as unhealthy since the client will
+// have lost control over the connection and can't otherwise verify
+// things are in good shape for connection pools.
+func (c *Client) Hijack() MemcachedConnection {
+ c.setHealthy(false)
+ return c.conn
+}
+
+func (c *Client) setHealthy(healthy bool) {
+ healthyState := UnHealthy
+ if healthy {
+ healthyState = Healthy
+ }
+ atomic.StoreUint32(&c.healthy, healthyState)
+}
+
+func IfResStatusError(response *gomemcached.MCResponse) bool {
+ return response == nil ||
+ (response.Status != gomemcached.SUBDOC_BAD_MULTI &&
+ response.Status != gomemcached.SUBDOC_PATH_NOT_FOUND &&
+ response.Status != gomemcached.SUBDOC_MULTI_PATH_FAILURE_DELETED)
+}
+
+func (c *Client) Conn() io.ReadWriteCloser {
+ return c.conn
+}
+
+// Since the binary request supports only a single collection at a time, it is possible
+// that this may be called multiple times in succession by callers to get vbSeqnos for
+// multiple collections. Thus, caller could pass in a non-nil map so the gomemcached
+// client won't need to allocate new map for each call to prevent too much GC
+// NOTE: If collection is enabled and context is not given, KV will still return stats for default collection
+func (c *Client) GetAllVbSeqnos(vbSeqnoMap map[uint16]uint64, context ...*ClientContext) (map[uint16]uint64, error) {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.GET_ALL_VB_SEQNOS,
+ Opaque: opaqueGetSeqno,
+ }
+
+ err := c.setVbSeqnoContext(rq, context...)
+ if err != nil {
+ return vbSeqnoMap, err
+ }
+
+ err = c.Transmit(rq)
+ if err != nil {
+ return vbSeqnoMap, err
+ }
+
+ res, err := c.Receive()
+ if err != nil {
+ return vbSeqnoMap, fmt.Errorf("failed to receive: %v", err)
+ }
+
+ vbSeqnosList, err := parseGetSeqnoResp(res.Body)
+ if err != nil {
+ logging.Errorf("Unable to parse : err: %v\n", err)
+ return vbSeqnoMap, err
+ }
+
+ if vbSeqnoMap == nil {
+ vbSeqnoMap = make(map[uint16]uint64)
+ }
+
+ combineMapWithReturnedList(vbSeqnoMap, vbSeqnosList)
+ return vbSeqnoMap, nil
+}
+
+func combineMapWithReturnedList(vbSeqnoMap map[uint16]uint64, list *VBSeqnos) {
+ if list == nil {
+ return
+ }
+
+ // If the map contains exactly the existing vbs in the list, no need to modify
+ needToCleanupMap := true
+ if len(vbSeqnoMap) == 0 {
+ needToCleanupMap = false
+ } else if len(vbSeqnoMap) == len(*list) {
+ needToCleanupMap = false
+ for _, pair := range *list {
+ _, vbExists := vbSeqnoMap[uint16(pair[0])]
+ if !vbExists {
+ needToCleanupMap = true
+ break
+ }
+ }
+ }
+
+ if needToCleanupMap {
+ var vbsToDelete []uint16
+ for vbInSeqnoMap, _ := range vbSeqnoMap {
+ // If a vb in the seqno map doesn't exist in the returned list, need to clean up
+ // to ensure returning an accurate result
+ found := false
+ var vbno uint16
+ for _, pair := range *list {
+ vbno = uint16(pair[0])
+ if vbno == vbInSeqnoMap {
+ found = true
+ break
+ } else if vbno > vbInSeqnoMap {
+ // definitely not in the list
+ break
+ }
+ }
+ if !found {
+ vbsToDelete = append(vbsToDelete, vbInSeqnoMap)
+ }
+ }
+
+ for _, vbno := range vbsToDelete {
+ delete(vbSeqnoMap, vbno)
+ }
+ }
+
+ // Set the map with data from the list
+ for _, pair := range *list {
+ vbno := uint16(pair[0])
+ seqno := pair[1]
+ vbSeqnoMap[vbno] = seqno
+ }
+}
+
+func (c *Client) GetErrorMap(errMapVersion gomemcached.ErrorMapVersion) (map[string]interface{}, error) {
+ if errMapVersion == gomemcached.ErrorMapInvalidVersion {
+ return nil, fmt.Errorf("Invalid version used")
+ }
+
+ payload := make([]byte, 2, 2)
+ binary.BigEndian.PutUint16(payload, uint16(errMapVersion))
+
+ rv, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GET_ERROR_MAP,
+ Body: payload,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ errMap := make(map[string]interface{})
+ err = json.Unmarshal(rv.Body, &errMap)
+ if err != nil {
+ return nil, err
+ }
+ return errMap, nil
+}
+
+func (c *Client) EnableDataPool(getter func(uint64) ([]byte, error), doneCb func([]byte)) error {
+ if atomic.CompareAndSwapUint32(&c.objPoolEnabled, datapoolDisabled, datapoolInit) {
+ c.datapoolGetter = getter
+ c.datapoolDone = doneCb
+ atomic.CompareAndSwapUint32(&c.objPoolEnabled, datapoolInit, datapoolInitDone)
+ return nil
+ }
+ return fmt.Errorf("Already enabled")
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/tap_feed.go b/vendor/github.com/couchbase/gomemcached/client/tap_feed.go
new file mode 100644
index 00000000..fd628c5d
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/tap_feed.go
@@ -0,0 +1,333 @@
+package memcached
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/goutils/logging"
+)
+
+// TAP protocol docs:
+
+// TapOpcode is the tap operation type (found in TapEvent)
+type TapOpcode uint8
+
+// Tap opcode values.
+const (
+ TapBeginBackfill = TapOpcode(iota)
+ TapEndBackfill
+ TapMutation
+ TapDeletion
+ TapCheckpointStart
+ TapCheckpointEnd
+ tapEndStream
+)
+
+const tapMutationExtraLen = 16
+
+var tapOpcodeNames map[TapOpcode]string
+
+func init() {
+ tapOpcodeNames = map[TapOpcode]string{
+ TapBeginBackfill: "BeginBackfill",
+ TapEndBackfill: "EndBackfill",
+ TapMutation: "Mutation",
+ TapDeletion: "Deletion",
+ TapCheckpointStart: "TapCheckpointStart",
+ TapCheckpointEnd: "TapCheckpointEnd",
+ tapEndStream: "EndStream",
+ }
+}
+
+func (opcode TapOpcode) String() string {
+ name := tapOpcodeNames[opcode]
+ if name == "" {
+ name = fmt.Sprintf("#%d", opcode)
+ }
+ return name
+}
+
+// TapEvent is a TAP notification of an operation on the server.
+type TapEvent struct {
+ Opcode TapOpcode // Type of event
+ VBucket uint16 // VBucket this event applies to
+ Flags uint32 // Item flags
+ Expiry uint32 // Item expiration time
+ Key, Value []byte // Item key/value
+ Cas uint64
+}
+
+func makeTapEvent(req gomemcached.MCRequest) *TapEvent {
+ event := TapEvent{
+ VBucket: req.VBucket,
+ }
+ switch req.Opcode {
+ case gomemcached.TAP_MUTATION:
+ event.Opcode = TapMutation
+ event.Key = req.Key
+ event.Value = req.Body
+ event.Cas = req.Cas
+ case gomemcached.TAP_DELETE:
+ event.Opcode = TapDeletion
+ event.Key = req.Key
+ event.Cas = req.Cas
+ case gomemcached.TAP_CHECKPOINT_START:
+ event.Opcode = TapCheckpointStart
+ case gomemcached.TAP_CHECKPOINT_END:
+ event.Opcode = TapCheckpointEnd
+ case gomemcached.TAP_OPAQUE:
+ if len(req.Extras) < 8+4 {
+ return nil
+ }
+ switch op := int(binary.BigEndian.Uint32(req.Extras[8:])); op {
+ case gomemcached.TAP_OPAQUE_INITIAL_VBUCKET_STREAM:
+ event.Opcode = TapBeginBackfill
+ case gomemcached.TAP_OPAQUE_CLOSE_BACKFILL:
+ event.Opcode = TapEndBackfill
+ case gomemcached.TAP_OPAQUE_CLOSE_TAP_STREAM:
+ event.Opcode = tapEndStream
+ case gomemcached.TAP_OPAQUE_ENABLE_AUTO_NACK:
+ return nil
+ case gomemcached.TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC:
+ return nil
+ default:
+ logging.Infof("TapFeed: Ignoring TAP_OPAQUE/%d", op)
+ return nil // unknown opaque event
+ }
+ case gomemcached.NOOP:
+ return nil // ignore
+ default:
+ logging.Infof("TapFeed: Ignoring %s", req.Opcode)
+ return nil // unknown event
+ }
+
+ if len(req.Extras) >= tapMutationExtraLen &&
+ (event.Opcode == TapMutation || event.Opcode == TapDeletion) {
+
+ event.Flags = binary.BigEndian.Uint32(req.Extras[8:])
+ event.Expiry = binary.BigEndian.Uint32(req.Extras[12:])
+ }
+
+ return &event
+}
+
+func (event TapEvent) String() string {
+ switch event.Opcode {
+ case TapBeginBackfill, TapEndBackfill, TapCheckpointStart, TapCheckpointEnd:
+ return fmt.Sprintf("",
+ event.Opcode, event.VBucket)
+ default:
+ return fmt.Sprintf("",
+ event.Opcode, event.Key, len(event.Value),
+ event.Flags, event.Expiry)
+ }
+}
+
+// TapArguments are parameters for requesting a TAP feed.
+//
+// Call DefaultTapArguments to get a default one.
+type TapArguments struct {
+ // Timestamp of oldest item to send.
+ //
+ // Use TapNoBackfill to suppress all past items.
+ Backfill uint64
+ // If set, server will disconnect after sending existing items.
+ Dump bool
+ // The indices of the vbuckets to watch; empty/nil to watch all.
+ VBuckets []uint16
+ // Transfers ownership of vbuckets during cluster rebalance.
+ Takeover bool
+ // If true, server will wait for client ACK after every notification.
+ SupportAck bool
+ // If true, client doesn't want values so server shouldn't send them.
+ KeysOnly bool
+ // If true, client wants the server to send checkpoint events.
+ Checkpoint bool
+ // Optional identifier to use for this client, to allow reconnects
+ ClientName string
+ // Registers this client (by name) till explicitly deregistered.
+ RegisteredClient bool
+}
+
+// Value for TapArguments.Backfill denoting that no past events at all
+// should be sent.
+const TapNoBackfill = math.MaxUint64
+
+// DefaultTapArguments returns a default set of parameter values to
+// pass to StartTapFeed.
+func DefaultTapArguments() TapArguments {
+ return TapArguments{
+ Backfill: TapNoBackfill,
+ }
+}
+
+func (args *TapArguments) flags() []byte {
+ var flags gomemcached.TapConnectFlag
+ if args.Backfill != 0 {
+ flags |= gomemcached.BACKFILL
+ }
+ if args.Dump {
+ flags |= gomemcached.DUMP
+ }
+ if len(args.VBuckets) > 0 {
+ flags |= gomemcached.LIST_VBUCKETS
+ }
+ if args.Takeover {
+ flags |= gomemcached.TAKEOVER_VBUCKETS
+ }
+ if args.SupportAck {
+ flags |= gomemcached.SUPPORT_ACK
+ }
+ if args.KeysOnly {
+ flags |= gomemcached.REQUEST_KEYS_ONLY
+ }
+ if args.Checkpoint {
+ flags |= gomemcached.CHECKPOINT
+ }
+ if args.RegisteredClient {
+ flags |= gomemcached.REGISTERED_CLIENT
+ }
+ encoded := make([]byte, 4)
+ binary.BigEndian.PutUint32(encoded, uint32(flags))
+ return encoded
+}
+
+func must(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (args *TapArguments) bytes() (rv []byte) {
+ buf := bytes.NewBuffer([]byte{})
+
+ if args.Backfill > 0 {
+ must(binary.Write(buf, binary.BigEndian, uint64(args.Backfill)))
+ }
+
+ if len(args.VBuckets) > 0 {
+ must(binary.Write(buf, binary.BigEndian, uint16(len(args.VBuckets))))
+ for i := 0; i < len(args.VBuckets); i++ {
+ must(binary.Write(buf, binary.BigEndian, uint16(args.VBuckets[i])))
+ }
+ }
+ return buf.Bytes()
+}
+
+// TapFeed represents a stream of events from a server.
+type TapFeed struct {
+ C <-chan TapEvent
+ Error error
+ closer chan bool
+}
+
+// StartTapFeed starts a TAP feed on a client connection.
+//
+// The events can be read from the returned channel. The connection
+// can no longer be used for other purposes; it's now reserved for
+// receiving the TAP messages. To stop receiving events, close the
+// client connection.
+func (mc *Client) StartTapFeed(args TapArguments) (*TapFeed, error) {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.TAP_CONNECT,
+ Key: []byte(args.ClientName),
+ Extras: args.flags(),
+ Body: args.bytes()}
+
+ err := mc.Transmit(rq)
+ if err != nil {
+ return nil, err
+ }
+
+ ch := make(chan TapEvent)
+ feed := &TapFeed{
+ C: ch,
+ closer: make(chan bool),
+ }
+ go mc.runFeed(ch, feed)
+ return feed, nil
+}
+
+// TapRecvHook is called after every incoming tap packet is received.
+var TapRecvHook func(*gomemcached.MCRequest, int, error)
+
+// Internal goroutine that reads from the socket and writes events to
+// the channel
+func (mc *Client) runFeed(ch chan TapEvent, feed *TapFeed) {
+ defer close(ch)
+ var headerBuf [gomemcached.HDR_LEN]byte
+loop:
+ for {
+ // Read the next request from the server.
+ //
+ // (Can't call mc.Receive() because it reads a
+ // _response_ not a request.)
+ var pkt gomemcached.MCRequest
+ n, err := pkt.Receive(mc.conn, headerBuf[:])
+ if TapRecvHook != nil {
+ TapRecvHook(&pkt, n, err)
+ }
+
+ if err != nil {
+ if err != io.EOF {
+ feed.Error = err
+ }
+ break loop
+ }
+
+ //logging.Infof("** TapFeed received %#v : %q", pkt, pkt.Body)
+
+ if pkt.Opcode == gomemcached.TAP_CONNECT {
+ // This is not an event from the server; it's
+ // an error response to my connect request.
+ feed.Error = fmt.Errorf("tap connection failed: %s", pkt.Body)
+ break loop
+ }
+
+ event := makeTapEvent(pkt)
+ if event != nil {
+ if event.Opcode == tapEndStream {
+ break loop
+ }
+
+ select {
+ case ch <- *event:
+ case <-feed.closer:
+ break loop
+ }
+ }
+
+ if len(pkt.Extras) >= 4 {
+ reqFlags := binary.BigEndian.Uint16(pkt.Extras[2:])
+ if reqFlags&gomemcached.TAP_ACK != 0 {
+ if _, err := mc.sendAck(&pkt); err != nil {
+ feed.Error = err
+ break loop
+ }
+ }
+ }
+ }
+ if err := mc.Close(); err != nil {
+ logging.Errorf("Error closing memcached client: %v", err)
+ }
+}
+
+func (mc *Client) sendAck(pkt *gomemcached.MCRequest) (int, error) {
+ res := gomemcached.MCResponse{
+ Opcode: pkt.Opcode,
+ Opaque: pkt.Opaque,
+ Status: gomemcached.SUCCESS,
+ }
+ return res.Transmit(mc.conn)
+}
+
+// Close terminates a TapFeed.
+//
+// Call this if you stop using a TapFeed before its channel ends.
+func (feed *TapFeed) Close() {
+ close(feed.closer)
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/transport.go b/vendor/github.com/couchbase/gomemcached/client/transport.go
new file mode 100644
index 00000000..a47a6ddd
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/transport.go
@@ -0,0 +1,85 @@
+package memcached
+
+import (
+ "errors"
+ "io"
+
+ "github.com/couchbase/gomemcached"
+)
+
+var errNoConn = errors.New("no connection")
+
+// UnwrapMemcachedError converts memcached errors to normal responses.
+//
+// If the error is a memcached response, declare the error to be nil
+// so a client can handle the status without worrying about whether it
+// indicates success or failure.
+func UnwrapMemcachedError(rv *gomemcached.MCResponse,
+ err error) (*gomemcached.MCResponse, error) {
+
+ if rv == err {
+ return rv, nil
+ }
+ return rv, err
+}
+
+// ReceiveHook is called after every packet is received (or attempted to be)
+var ReceiveHook func(*gomemcached.MCResponse, int, error)
+
+func getResponse(s io.Reader, hdrBytes []byte) (rv *gomemcached.MCResponse, n int, err error) {
+ if s == nil {
+ return nil, 0, errNoConn
+ }
+
+ rv = &gomemcached.MCResponse{}
+ n, err = rv.Receive(s, hdrBytes)
+
+ if ReceiveHook != nil {
+ ReceiveHook(rv, n, err)
+ }
+
+ if err == nil && (rv.Status != gomemcached.SUCCESS && rv.Status != gomemcached.AUTH_CONTINUE) {
+ err = rv
+ }
+ return rv, n, err
+}
+
+func getResponseWithPool(s io.Reader, hdrBytes []byte, getter func(uint64) ([]byte, error), done func([]byte)) (rv *gomemcached.MCResponse, n int, err error) {
+ if s == nil {
+ return nil, 0, errNoConn
+ }
+
+ rv = &gomemcached.MCResponse{}
+ n, err = rv.ReceiveWithDatapool(s, hdrBytes, getter, done)
+
+ if ReceiveHook != nil {
+ ReceiveHook(rv, n, err)
+ }
+
+ if err == nil && (rv.Status != gomemcached.SUCCESS && rv.Status != gomemcached.AUTH_CONTINUE) {
+ err = rv
+ }
+ return rv, n, err
+}
+
+// TransmitHook is called after each packet is transmitted.
+var TransmitHook func(*gomemcached.MCRequest, int, error)
+
+func transmitRequest(o io.Writer, req *gomemcached.MCRequest) (int, error) {
+ if o == nil {
+ return 0, errNoConn
+ }
+ n, err := req.Transmit(o)
+ if TransmitHook != nil {
+ TransmitHook(req, n, err)
+ }
+ return n, err
+}
+
+func transmitResponse(o io.Writer, res *gomemcached.MCResponse) (int, error) {
+ if o == nil {
+ return 0, errNoConn
+ }
+ n, err := res.Transmit(o)
+ return n, err
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/upr_event.go b/vendor/github.com/couchbase/gomemcached/client/upr_event.go
new file mode 100644
index 00000000..edf0499d
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/upr_event.go
@@ -0,0 +1,477 @@
+package memcached
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/internal/flatbuffers/systemevents"
+)
+
+type SystemEventType int
+
+const InvalidSysEvent SystemEventType = -1
+
+const (
+ CollectionCreate SystemEventType = 0
+ CollectionDrop SystemEventType = iota
+ CollectionFlush SystemEventType = iota // KV did not implement
+ ScopeCreate SystemEventType = iota
+ ScopeDrop SystemEventType = iota
+ CollectionChanged SystemEventType = iota
+)
+
+type ScopeCreateEvent interface {
+ GetSystemEventName() (string, error)
+ GetScopeId() (uint32, error)
+ GetManifestId() (uint64, error)
+}
+
+type CollectionCreateEvent interface {
+ GetSystemEventName() (string, error)
+ GetScopeId() (uint32, error)
+ GetCollectionId() (uint32, error)
+ GetManifestId() (uint64, error)
+ GetMaxTTL() (uint32, error)
+}
+
+type CollectionDropEvent interface {
+ GetScopeId() (uint32, error)
+ GetCollectionId() (uint32, error)
+ GetManifestId() (uint64, error)
+}
+
+type ScopeDropEvent interface {
+ GetScopeId() (uint32, error)
+ GetManifestId() (uint64, error)
+}
+
+type CollectionChangedEvent interface {
+ GetCollectionId() (uint32, error)
+ GetManifestId() (uint64, error)
+ GetMaxTTL() (uint32, error)
+}
+
+var ErrorInvalidOp error = fmt.Errorf("Invalid Operation")
+var ErrorInvalidVersion error = fmt.Errorf("Invalid version for parsing")
+var ErrorValueTooShort error = fmt.Errorf("Value length is too short")
+var ErrorNoMaxTTL error = fmt.Errorf("This event has no max TTL")
+
+// UprEvent memcached events for UPR streams.
+type UprEvent struct {
+ Opcode gomemcached.CommandCode // Type of event
+ Status gomemcached.Status // Response status
+ VBucket uint16 // VBucket this event applies to
+ DataType uint8 // data type
+ Opaque uint16 // 16 MSB of opaque
+ VBuuid uint64 // This field is set by downstream
+ Flags uint32 // Item flags
+ Expiry uint32 // Item expiration time
+ Key, Value []byte // Item key/value
+ OldValue []byte // TODO: TBD: old document value
+ Cas uint64 // CAS value of the item
+ Seqno uint64 // sequence number of the mutation
+ RevSeqno uint64 // rev sequence number : deletions
+ LockTime uint32 // Lock time
+ MetadataSize uint16 // Metadata size
+ SnapstartSeq uint64 // start sequence number of this snapshot
+ SnapendSeq uint64 // End sequence number of the snapshot
+ SnapshotType uint32 // 0: disk 1: memory
+ FailoverLog *FailoverLog // Failover log containing vvuid and sequnce number
+ Error error // Error value in case of a failure
+ ExtMeta []byte // Extended Metadata
+ AckSize uint32 // The number of bytes that can be Acked to DCP
+ SystemEvent SystemEventType // Only valid if IsSystemEvent() is true
+ SysEventVersion uint8 // Based on the version, the way Extra bytes is parsed is different
+ ValueLen int // Cache it to avoid len() calls for performance
+ CollectionId uint32 // Valid if Collection is in use
+ StreamId *uint16 // Nil if not in use
+}
+
+// FailoverLog containing vvuid and sequnce number
+type FailoverLog [][2]uint64
+
+// Containing a pair of vbno and the high seqno
+type VBSeqnos [][2]uint64
+
+func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream, bytesReceivedFromDCP int) *UprEvent {
+ event := &UprEvent{
+ Opcode: rq.Opcode,
+ Value: rq.Body,
+ Cas: rq.Cas,
+ ExtMeta: rq.ExtMeta,
+ DataType: rq.DataType,
+ ValueLen: len(rq.Body),
+ SystemEvent: InvalidSysEvent,
+ CollectionId: math.MaxUint32,
+ }
+
+ if stream != nil {
+ event.VBucket = stream.Vbucket
+ event.VBuuid = stream.Vbuuid
+ event.PopulateFieldsBasedOnStreamType(rq, stream.StreamType)
+ } else {
+ event.VBucket = vbOpaque(rq.Opaque)
+ }
+
+ // set AckSize for events that need to be acked to DCP,
+ // i.e., events with CommandCodes that need to be buffered in DCP
+ if _, ok := gomemcached.BufferedCommandCodeMap[rq.Opcode]; ok {
+ event.AckSize = uint32(bytesReceivedFromDCP)
+ }
+
+ // 16 LSBits are used by client library to encode vbucket number.
+ // 16 MSBits are left for application to multiplex on opaque value.
+ event.Opaque = appOpaque(rq.Opaque)
+
+ if len(rq.Extras) >= uprMutationExtraLen &&
+ event.Opcode == gomemcached.UPR_MUTATION {
+
+ event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.Flags = binary.BigEndian.Uint32(rq.Extras[16:20])
+ event.Expiry = binary.BigEndian.Uint32(rq.Extras[20:24])
+ event.LockTime = binary.BigEndian.Uint32(rq.Extras[24:28])
+ event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[28:30])
+
+ } else if len(rq.Extras) >= uprDeletetionWithDeletionTimeExtraLen &&
+ event.Opcode == gomemcached.UPR_DELETION {
+
+ event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.Expiry = binary.BigEndian.Uint32(rq.Extras[16:20])
+
+ } else if len(rq.Extras) >= uprDeletetionExtraLen &&
+ event.Opcode == gomemcached.UPR_DELETION ||
+ event.Opcode == gomemcached.UPR_EXPIRATION {
+
+ event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[16:18])
+
+ } else if len(rq.Extras) >= uprSnapshotExtraLen &&
+ event.Opcode == gomemcached.UPR_SNAPSHOT {
+
+ event.SnapstartSeq = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.SnapendSeq = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.SnapshotType = binary.BigEndian.Uint32(rq.Extras[16:20])
+ } else if event.IsSystemEvent() {
+ event.PopulateEvent(rq.Extras)
+ } else if event.IsSeqnoAdv() {
+ event.PopulateSeqnoAdv(rq.Extras)
+ } else if event.IsOsoSnapshot() {
+ event.PopulateOso(rq.Extras)
+ } else if event.IsStreamEnd() {
+ event.PopulateStreamEndFlags(rq.Extras)
+ }
+
+ return event
+}
+
+func (event *UprEvent) PopulateFieldsBasedOnStreamType(rq gomemcached.MCRequest, streamType DcpStreamType) {
+ switch streamType {
+ case CollectionsStreamId:
+ for _, extra := range rq.FramingExtras {
+ streamId, streamIdErr := extra.GetStreamId()
+ if streamIdErr == nil {
+ event.StreamId = &streamId
+ }
+ }
+ // After parsing streamID, still need to populate regular collectionID
+ fallthrough
+ case CollectionsNonStreamId:
+ switch rq.Opcode {
+ // Only these will have CID encoded within the key
+ case gomemcached.UPR_MUTATION,
+ gomemcached.UPR_DELETION,
+ gomemcached.UPR_EXPIRATION:
+ uleb128 := Uleb128(rq.Key)
+ result, bytesShifted := uleb128.ToUint32(rq.Keylen)
+ event.CollectionId = result
+ event.Key = rq.Key[bytesShifted:]
+ default:
+ event.Key = rq.Key
+ }
+ case NonCollectionStream:
+ // Let default behavior be legacy stream type
+ fallthrough
+ default:
+ event.Key = rq.Key
+ }
+}
+
+func (event *UprEvent) String() string {
+ name := gomemcached.CommandNames[event.Opcode]
+ if name == "" {
+ name = fmt.Sprintf("#%d", event.Opcode)
+ }
+ return name
+}
+
+func (event *UprEvent) IsSnappyDataType() bool {
+ return event.Opcode == gomemcached.UPR_MUTATION && (event.DataType&SnappyDataType > 0)
+}
+
+func (event *UprEvent) IsCollectionType() bool {
+ return event.IsSystemEvent() || event.CollectionId <= math.MaxUint32
+}
+
+func (event *UprEvent) IsSystemEvent() bool {
+ return event.Opcode == gomemcached.DCP_SYSTEM_EVENT
+}
+
+func (event *UprEvent) IsSeqnoAdv() bool {
+ return event.Opcode == gomemcached.DCP_SEQNO_ADV
+}
+
+func (event *UprEvent) IsOsoSnapshot() bool {
+ return event.Opcode == gomemcached.DCP_OSO_SNAPSHOT
+}
+
+func (event *UprEvent) IsStreamEnd() bool {
+ return event.Opcode == gomemcached.UPR_STREAMEND
+}
+
+func (event *UprEvent) PopulateEvent(extras []byte) {
+ if len(extras) < dcpSystemEventExtraLen {
+ // Wrong length, don't parse
+ return
+ }
+
+ event.Seqno = binary.BigEndian.Uint64(extras[:8])
+ event.SystemEvent = SystemEventType(binary.BigEndian.Uint32(extras[8:12]))
+ event.SysEventVersion = extras[12]
+}
+
+func (event *UprEvent) PopulateSeqnoAdv(extras []byte) {
+ if len(extras) < dcpSeqnoAdvExtraLen {
+ // Wrong length, don't parse
+ return
+ }
+
+ event.Seqno = binary.BigEndian.Uint64(extras[:8])
+}
+
+func (event *UprEvent) PopulateOso(extras []byte) {
+ if len(extras) < dcpOsoExtraLen {
+ // Wrong length, don't parse
+ return
+ }
+ event.Flags = binary.BigEndian.Uint32(extras[:4])
+}
+
+func (event *UprEvent) PopulateStreamEndFlags(extras []byte) {
+ if len(extras) < dcpStreamEndExtraLen {
+ // Wrong length, don't parse
+ return
+ }
+ event.Flags = binary.BigEndian.Uint32(extras[:4])
+}
+
+func (event *UprEvent) GetSystemEventName() (string, error) {
+ switch event.SystemEvent {
+ case CollectionCreate:
+ fallthrough
+ case ScopeCreate:
+ return string(event.Key), nil
+ default:
+ return "", ErrorInvalidOp
+ }
+}
+
+func (event *UprEvent) GetManifestId() (uint64, error) {
+ // non-flatbuffer-serialised data
+ if event.SysEventVersion < 2 {
+ switch event.SystemEvent {
+ case ScopeDrop:
+ fallthrough
+ case ScopeCreate:
+ fallthrough
+ case CollectionDrop:
+ if event.SysEventVersion > 0 { // "Version 0 only" check for this and the above event types
+ return 0, ErrorInvalidVersion
+ }
+ fallthrough
+ case CollectionCreate:
+ if event.SysEventVersion > 1 { // CollectionCreate supports version 0 & 1
+ return 0, ErrorInvalidVersion
+ }
+ if event.ValueLen < 8 {
+ return 0, ErrorValueTooShort
+ }
+ return binary.BigEndian.Uint64(event.Value[0:8]), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+ }
+
+ // 'version 2' system events are all flatbuffer serialised
+ switch event.SystemEvent {
+ case ScopeDrop:
+ fb := systemevents.GetRootAsDroppedScope(event.Value, 0)
+ return fb.Uid(), nil
+ case ScopeCreate:
+ fb := systemevents.GetRootAsScope(event.Value, 0)
+ return fb.Uid(), nil
+ case CollectionDrop:
+ fb := systemevents.GetRootAsDroppedCollection(event.Value, 0)
+ return fb.Uid(), nil
+ case CollectionCreate:
+ fallthrough
+ case CollectionChanged:
+ fb := systemevents.GetRootAsCollection(event.Value, 0)
+ return fb.Uid(), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+}
+
+func (event *UprEvent) GetCollectionId() (uint32, error) {
+ // non-flatbuffer-serialised data
+ if event.SysEventVersion < 2 {
+ switch event.SystemEvent {
+ case CollectionDrop:
+ if event.SysEventVersion > 0 {
+ return 0, ErrorInvalidVersion
+ }
+ fallthrough
+ case CollectionCreate:
+ if event.SysEventVersion > 1 {
+ return 0, ErrorInvalidVersion
+ }
+ if event.ValueLen < 16 {
+ return 0, ErrorValueTooShort
+ }
+ return binary.BigEndian.Uint32(event.Value[12:16]), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+ }
+
+ // 'version 2' system events are all flatbuffer serialised
+ switch event.SystemEvent {
+ case CollectionDrop:
+ fb := systemevents.GetRootAsDroppedCollection(event.Value, 0)
+ return fb.CollectionId(), nil
+ case CollectionCreate:
+ fallthrough
+ case CollectionChanged:
+ fb := systemevents.GetRootAsCollection(event.Value, 0)
+ return fb.CollectionId(), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+}
+
+func (event *UprEvent) GetScopeId() (uint32, error) {
+ // non-flatbuffer-serialised data
+ if event.SysEventVersion < 2 {
+ switch event.SystemEvent {
+ case ScopeCreate:
+ fallthrough
+ case ScopeDrop:
+ fallthrough
+ case CollectionDrop:
+ if event.SysEventVersion > 0 { // "Version 0 only" check for this and the above event types
+ return 0, ErrorInvalidVersion
+ }
+ fallthrough
+ case CollectionCreate:
+ if event.SysEventVersion > 1 { // CollectionCreate supports version 0 & 1
+ return 0, ErrorInvalidVersion
+ }
+ if event.ValueLen < 12 {
+ return 0, ErrorValueTooShort
+ }
+ return binary.BigEndian.Uint32(event.Value[8:12]), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+ }
+
+ // 'version 2' system events are all flatbuffer serialised
+ switch event.SystemEvent {
+ case ScopeDrop:
+ fb := systemevents.GetRootAsDroppedScope(event.Value, 0)
+ return fb.ScopeId(), nil
+ case ScopeCreate:
+ fb := systemevents.GetRootAsScope(event.Value, 0)
+ return fb.ScopeId(), nil
+ case CollectionDrop:
+ fb := systemevents.GetRootAsDroppedCollection(event.Value, 0)
+ return fb.ScopeId(), nil
+ case CollectionCreate:
+ fallthrough
+ case CollectionChanged:
+ fb := systemevents.GetRootAsCollection(event.Value, 0)
+ return fb.ScopeId(), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+}
+
+func (event *UprEvent) GetMaxTTL() (uint32, error) {
+ // non-flatbuffer-serialised data
+ if event.SysEventVersion < 2 {
+ switch event.SystemEvent {
+ case CollectionCreate:
+ if event.SysEventVersion < 1 {
+ return 0, ErrorNoMaxTTL
+ }
+ if event.ValueLen < 20 {
+ return 0, ErrorValueTooShort
+ }
+ return binary.BigEndian.Uint32(event.Value[16:20]), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+ }
+
+ // 'version 2' system events are all flatbuffer serialised
+ switch event.SystemEvent {
+ case CollectionCreate:
+ fallthrough
+ case CollectionChanged:
+ fb := systemevents.GetRootAsCollection(event.Value, 0)
+ return fb.MaxTtl(), nil
+ default:
+ return 0, ErrorInvalidOp
+ }
+}
+
+// Only if error is nil:
+// Returns true if event states oso begins
+// Return false if event states oso ends
+func (event *UprEvent) GetOsoBegin() (bool, error) {
+ if !event.IsOsoSnapshot() {
+ return false, ErrorInvalidOp
+ }
+
+ if event.Flags == 1 {
+ return true, nil
+ } else if event.Flags == 2 {
+ return false, nil
+ } else {
+ return false, ErrorInvalidOp
+ }
+}
+
+type Uleb128 []byte
+
+func (u Uleb128) ToUint32(cachedLen int) (result uint32, bytesShifted int) {
+ var shift uint = 0
+
+ for curByte := 0; curByte < cachedLen; curByte++ {
+ oneByte := u[curByte]
+ last7Bits := 0x7f & oneByte
+ result |= uint32(last7Bits) << shift
+ bytesShifted++
+ if oneByte&0x80 == 0 {
+ break
+ }
+ shift += 7
+ }
+
+ return
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/upr_feed.go b/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
new file mode 100644
index 00000000..841809ec
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
@@ -0,0 +1,1213 @@
+// go implementation of upr client.
+// See https://github.com/couchbaselabs/cbupr/blob/master/transport-spec.md
+// TODO
+// 1. Use a pool allocator to avoid garbage
+package memcached
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/goutils/logging"
+)
+
+const UPRDefaultNoopIntervalSeconds = 120
+
+const uprMutationExtraLen = 30
+const uprDeletetionExtraLen = 18
+const uprDeletetionWithDeletionTimeExtraLen = 21
+const uprSnapshotExtraLen = 20
+const dcpSystemEventExtraLen = 13
+const dcpSeqnoAdvExtraLen = 8
+const bufferAckThreshold = 0.2
+const opaqueOpen = 0xBEAF0001
+const opaqueFailover = 0xDEADBEEF
+const opaqueGetSeqno = 0xDEADBEEF
+const dcpOsoExtraLen = 4
+const dcpStreamEndExtraLen = 4
+
+// Counter on top of opaqueOpen that others can draw from for open and control msgs
+var opaqueOpenCtrlWell uint32 = opaqueOpen
+
+type PriorityType string
+
+// high > medium > disabled > low
+const (
+ PriorityDisabled PriorityType = ""
+ PriorityLow PriorityType = "low"
+ PriorityMed PriorityType = "medium"
+ PriorityHigh PriorityType = "high"
+)
+
+type DcpStreamType int32
+
+var UninitializedStream DcpStreamType = -1
+
+const (
+ NonCollectionStream DcpStreamType = 0
+ CollectionsNonStreamId DcpStreamType = iota
+ CollectionsStreamId DcpStreamType = iota
+)
+
+func (t DcpStreamType) String() string {
+ switch t {
+ case UninitializedStream:
+ return "Un-Initialized Stream"
+ case NonCollectionStream:
+ return "Traditional Non-Collection Stream"
+ case CollectionsNonStreamId:
+ return "Collections Stream without StreamID"
+ case CollectionsStreamId:
+ return "Collection Stream with StreamID"
+ default:
+ return "Unknown Stream Type"
+ }
+}
+
+// UprStream is per stream data structure over an UPR Connection.
+type UprStream struct {
+ Vbucket uint16 // Vbucket id
+ Vbuuid uint64 // vbucket uuid
+ StartSeq uint64 // start sequence number
+ EndSeq uint64 // end sequence number
+ connected bool
+ StreamType DcpStreamType
+}
+
+type FeedState int
+
+const (
+ FeedStateInitial = iota
+ FeedStateOpened = iota
+ FeedStateClosed = iota
+)
+
+func (fs FeedState) String() string {
+ switch fs {
+ case FeedStateInitial:
+ return "Initial"
+ case FeedStateOpened:
+ return "Opened"
+ case FeedStateClosed:
+ return "Closed"
+ default:
+ return "Unknown"
+ }
+}
+
+const (
+ CompressionTypeStartMarker = iota // also means invalid
+ CompressionTypeNone = iota
+ CompressionTypeSnappy = iota
+ CompressionTypeEndMarker = iota // also means invalid
+)
+
+// kv_engine/include/mcbp/protocol/datatype.h
+const (
+ JSONDataType uint8 = 1
+ SnappyDataType uint8 = 2
+ XattrDataType uint8 = 4
+)
+
+type UprFeatures struct {
+ Xattribute bool
+ CompressionType int
+ IncludeDeletionTime bool
+ DcpPriority PriorityType
+ EnableExpiry bool
+ EnableStreamId bool
+ EnableOso bool
+ SendStreamEndOnClose bool
+ EnableFlatbuffersSysEvents bool
+ clientReadThreshold int // set by EnableDeadConnDetection()
+}
+
+// Enables client-side dead connection detection. `threshold` should have a minimum value of (2*UPRDefaultNoopInterval).
+//
+// Refer https://github.com/couchbase/kv_engine/blob/df1df5e3986dbca368834e6e32c98103deeeec1b/docs/dcp/documentation/dead-connections.md
+func (f *UprFeatures) EnableDeadConnDetection(thresholdSeconds int) error {
+ minThreshold := 2 * UPRDefaultNoopIntervalSeconds
+
+ if thresholdSeconds < minThreshold {
+ return fmt.Errorf("threshold value (%v) is too low, needs to be atleast %v", thresholdSeconds, minThreshold)
+ }
+
+ f.clientReadThreshold = thresholdSeconds
+ return nil
+}
+
+/**
+ * Used to handle multiple concurrent calls UprRequestStream() by UprFeed clients
+ * It is expected that a client that calls UprRequestStream() more than once should issue
+ * different "opaque" (version) numbers
+ */
+type opaqueStreamMap map[uint16]*UprStream // opaque -> stream
+
+type vbStreamNegotiator struct {
+ vbHandshakeMap map[uint16]opaqueStreamMap // vbno -> opaqueStreamMap
+ mutex sync.RWMutex
+}
+
+func (negotiator *vbStreamNegotiator) initialize() {
+ negotiator.mutex.Lock()
+ negotiator.vbHandshakeMap = make(map[uint16]opaqueStreamMap)
+ negotiator.mutex.Unlock()
+}
+
+func (negotiator *vbStreamNegotiator) registerRequest(vbno, opaque uint16, vbuuid, startSequence, endSequence uint64) {
+ negotiator.mutex.Lock()
+ defer negotiator.mutex.Unlock()
+
+ var osMap opaqueStreamMap
+ var ok bool
+ if osMap, ok = negotiator.vbHandshakeMap[vbno]; !ok {
+ osMap = make(opaqueStreamMap)
+ negotiator.vbHandshakeMap[vbno] = osMap
+ }
+
+ if _, ok = osMap[opaque]; !ok {
+ osMap[opaque] = &UprStream{
+ Vbucket: vbno,
+ Vbuuid: vbuuid,
+ StartSeq: startSequence,
+ EndSeq: endSequence,
+ }
+ }
+}
+
+func (negotiator *vbStreamNegotiator) getStreamsCntFromMap(vbno uint16) int {
+ negotiator.mutex.RLock()
+ defer negotiator.mutex.RUnlock()
+
+ osmap, ok := negotiator.vbHandshakeMap[vbno]
+ if !ok {
+ return 0
+ } else {
+ return len(osmap)
+ }
+}
+
+func (negotiator *vbStreamNegotiator) getStreamFromMap(vbno, opaque uint16) (*UprStream, error) {
+ negotiator.mutex.RLock()
+ defer negotiator.mutex.RUnlock()
+
+ osmap, ok := negotiator.vbHandshakeMap[vbno]
+ if !ok {
+ return nil, fmt.Errorf("Error: stream for vb: %v does not exist", vbno)
+ }
+
+ stream, ok := osmap[opaque]
+ if !ok {
+ return nil, fmt.Errorf("Error: stream for vb: %v opaque: %v does not exist", vbno, opaque)
+ }
+ return stream, nil
+}
+
+func (negotiator *vbStreamNegotiator) deleteStreamFromMap(vbno, opaque uint16) {
+ negotiator.mutex.Lock()
+ defer negotiator.mutex.Unlock()
+
+ osmap, ok := negotiator.vbHandshakeMap[vbno]
+ if !ok {
+ return
+ }
+
+ delete(osmap, opaque)
+ if len(osmap) == 0 {
+ delete(negotiator.vbHandshakeMap, vbno)
+ }
+}
+
+func (negotiator *vbStreamNegotiator) handleStreamRequest(feed *UprFeed,
+ headerBuf [gomemcached.HDR_LEN]byte, pktPtr *gomemcached.MCRequest, bytesReceivedFromDCP int,
+ response *gomemcached.MCResponse) (*UprEvent, error) {
+ var event *UprEvent
+
+ if feed == nil || response == nil || pktPtr == nil {
+ return nil, errors.New("Invalid inputs")
+ }
+
+ // Get Stream from negotiator map
+ vbno := vbOpaque(response.Opaque)
+ opaque := appOpaque(response.Opaque)
+
+ stream, err := negotiator.getStreamFromMap(vbno, opaque)
+ if err != nil {
+ err = fmt.Errorf("Stream not found for vb %d: %#v", vbno, *pktPtr)
+ logging.Errorf(err.Error())
+ return nil, err
+ }
+
+ status, rb, flog, err := handleStreamRequest(response, headerBuf[:])
+
+ if status == gomemcached.ROLLBACK {
+ event = makeUprEvent(*pktPtr, stream, bytesReceivedFromDCP)
+ event.Status = status
+ // rollback stream
+ logging.Infof("UPR_STREAMREQ with rollback %d for vb %d Failed: %v", rb, vbno, err)
+ negotiator.deleteStreamFromMap(vbno, opaque)
+ } else if status == gomemcached.SUCCESS {
+ event = makeUprEvent(*pktPtr, stream, bytesReceivedFromDCP)
+ event.Seqno = stream.StartSeq
+ event.FailoverLog = flog
+ event.Status = status
+ feed.activateStream(vbno, opaque, stream)
+ feed.negotiator.deleteStreamFromMap(vbno, opaque)
+ logging.Infof("UPR_STREAMREQ for vb %d successful", vbno)
+
+ } else if err != nil {
+ logging.Errorf("UPR_STREAMREQ for vbucket %d erro %s", vbno, err.Error())
+ event = &UprEvent{
+ Opcode: gomemcached.UPR_STREAMREQ,
+ Status: status,
+ VBucket: vbno,
+ Error: err,
+ }
+ negotiator.deleteStreamFromMap(vbno, opaque)
+ }
+ return event, err
+}
+
+func (negotiator *vbStreamNegotiator) cleanUpVbStreams(vbno uint16) {
+ negotiator.mutex.Lock()
+ defer negotiator.mutex.Unlock()
+
+ delete(negotiator.vbHandshakeMap, vbno)
+}
+
+// UprFeed represents an UPR feed. A feed contains a connection to a single
+// host and multiple vBuckets
+type UprFeed struct {
+ // lock for feed.vbstreams
+ muVbstreams sync.RWMutex
+ C <-chan *UprEvent // Exported channel for receiving UPR events
+ negotiator vbStreamNegotiator // Used for pre-vbstreams, concurrent vb stream negotiation
+ vbstreams map[uint16]*UprStream // official live vb->stream mapping
+ closer chan bool // closer
+ conn *Client // connection to UPR producer
+ Error error // error
+ bytesRead uint64 // total bytes read on this connection
+ toAckBytes uint32 // bytes client has read
+ maxAckBytes uint32 // Max buffer control ack bytes
+ stats UprStats // Stats for upr client
+ transmitCh chan *gomemcached.MCRequest // transmit command channel
+ transmitCl chan bool // closer channel for transmit go-routine
+ // if flag is true, upr feed will use ack from client to determine whether/when to send ack to DCP
+ // if flag is false, upr feed will track how many bytes it has sent to client
+ // and use that to determine whether/when to send ack to DCP
+ ackByClient bool
+ feedState FeedState
+ muFeedState sync.RWMutex
+ activatedFeatures UprFeatures
+ collectionEnabled bool // This is needed separately because parsing depends on this
+ // DCP StreamID allows multiple filtered collection streams to share a single DCP Stream
+ // It is not allowed once a regular/legacy stream was started originally
+ streamsType DcpStreamType
+ initStreamTypeOnce sync.Once
+
+ transmitCloseOnce sync.Once
+
+ closeStreamRequested map[uint16]bool
+ closeStreamReqMtx sync.RWMutex
+
+ // Client will wait for `clientReadThreshold` seconds to receive a message
+ // from the Producer before considering the connection to be dead, and disconnect.
+ clientReadThreshold int
+}
+
+// Exported interface - to allow for mocking
+type UprFeedIface interface {
+ Close()
+ Closed() bool
+ CloseStream(vbno, opaqueMSB uint16) error
+ GetError() error
+ GetUprStats() *UprStats
+ ClientAck(event *UprEvent) error
+ GetUprEventCh() <-chan *UprEvent
+ StartFeed() error
+ StartFeedWithConfig(datachan_len int) error
+ UprOpen(name string, sequence uint32, bufSize uint32) error
+ UprOpenWithXATTR(name string, sequence uint32, bufSize uint32) error
+ UprOpenWithFeatures(name string, sequence uint32, bufSize uint32, features UprFeatures) (error, UprFeatures)
+ UprRequestStream(vbno, opaqueMSB uint16, flags uint32, vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error
+ // Set DCP priority on an existing DCP connection. The command is sent asynchronously without waiting for a response
+ SetPriorityAsync(p PriorityType) error
+
+ // Various Collection-Type RequestStreams
+ UprRequestCollectionsStream(vbno, opaqueMSB uint16, flags uint32, vbuuid, startSeq, endSeq, snapStart, snapEnd uint64, filter *CollectionsFilter) error
+}
+
+type UprStats struct {
+ TotalBytes uint64
+ TotalMutation uint64
+ TotalBufferAckSent uint64
+ TotalSnapShot uint64
+}
+
+// error codes
+var ErrorInvalidLog = errors.New("couchbase.errorInvalidLog")
+
+func (flogp *FailoverLog) Latest() (vbuuid, seqno uint64, err error) {
+ if flogp != nil {
+ flog := *flogp
+ latest := flog[len(flog)-1]
+ return latest[0], latest[1], nil
+ }
+ return vbuuid, seqno, ErrorInvalidLog
+}
+
+func (feed *UprFeed) sendCommands(mc *Client) {
+ transmitCh := feed.transmitCh
+ transmitCl := feed.transmitCl
+loop:
+ for {
+ select {
+ case command := <-transmitCh:
+ if err := mc.Transmit(command); err != nil {
+ logging.Errorf("Failed to transmit command %s. Error %s", command.Opcode.String(), err.Error())
+ // get feed to close and runFeed routine to exit
+ feed.Close()
+ break loop
+ }
+
+ case <-transmitCl:
+ break loop
+ }
+ }
+
+ // After sendCommands exits, write to transmitCh will block forever
+ // when we write to transmitCh, e.g., at CloseStream(), we need to check feed closure to have an exit route
+
+ logging.Infof("sendCommands exiting")
+}
+
+// Sets the specified stream as the connected stream for this vbno, and also cleans up negotiator
+func (feed *UprFeed) activateStream(vbno, opaque uint16, stream *UprStream) error {
+ feed.muVbstreams.Lock()
+ defer feed.muVbstreams.Unlock()
+
+ if feed.collectionEnabled {
+ stream.StreamType = feed.streamsType
+ }
+
+ // Set this stream as the officially connected stream for this vb
+ stream.connected = true
+ feed.vbstreams[vbno] = stream
+ return nil
+}
+
+func (feed *UprFeed) cleanUpVbStream(vbno uint16) {
+ feed.muVbstreams.Lock()
+ defer feed.muVbstreams.Unlock()
+
+ delete(feed.vbstreams, vbno)
+}
+
+// NewUprFeed creates a new UPR Feed.
+// TODO: Describe side-effects on bucket instance and its connection pool.
+func (mc *Client) NewUprFeed() (*UprFeed, error) {
+ return mc.NewUprFeedWithConfig(false /*ackByClient*/)
+}
+
+func (mc *Client) NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error) {
+ feed := &UprFeed{
+ conn: mc,
+ closer: make(chan bool, 1),
+ vbstreams: make(map[uint16]*UprStream),
+ transmitCh: make(chan *gomemcached.MCRequest),
+ transmitCl: make(chan bool),
+ ackByClient: ackByClient,
+ collectionEnabled: mc.CollectionEnabled(),
+ streamsType: UninitializedStream,
+ closeStreamRequested: map[uint16]bool{},
+ }
+
+ feed.negotiator.initialize()
+
+ go feed.sendCommands(mc)
+ return feed, nil
+}
+
+func (mc *Client) NewUprFeedIface() (UprFeedIface, error) {
+ return mc.NewUprFeed()
+}
+
+func (mc *Client) NewUprFeedWithConfigIface(ackByClient bool) (UprFeedIface, error) {
+ return mc.NewUprFeedWithConfig(ackByClient)
+}
+
+func doUprOpen(mc *Client, name string, sequence uint32, features UprFeatures) error {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_OPEN,
+ Key: []byte(name),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+
+ rq.Extras = make([]byte, 8)
+ binary.BigEndian.PutUint32(rq.Extras[:4], sequence)
+
+ // opens a producer type connection
+ flags := gomemcached.DCP_PRODUCER
+ if features.Xattribute {
+ flags = flags | gomemcached.DCP_OPEN_INCLUDE_XATTRS
+ }
+ if features.IncludeDeletionTime {
+ flags = flags | gomemcached.DCP_OPEN_INCLUDE_DELETE_TIMES
+ }
+ binary.BigEndian.PutUint32(rq.Extras[4:], flags)
+
+ return sendMcRequestSync(mc, rq)
+}
+
+// Synchronously send a memcached request and wait for the response
+func sendMcRequestSync(mc *Client, req *gomemcached.MCRequest) error {
+ if err := mc.Transmit(req); err != nil {
+ return err
+ }
+
+ if res, err := mc.Receive(); err != nil {
+ return err
+ } else if req.Opcode != res.Opcode {
+ return fmt.Errorf("unexpected #opcode sent %v received %v", req.Opcode, res.Opaque)
+ } else if req.Opaque != res.Opaque {
+ return fmt.Errorf("opaque mismatch, sent %v received %v", req.Opaque, res.Opaque)
+ } else if res.Status != gomemcached.SUCCESS {
+ return fmt.Errorf("error %v", res.Status)
+ }
+ return nil
+}
+
+// UprOpen to connect with a UPR producer.
+// Name: name of te UPR connection
+// sequence: sequence number for the connection
+// bufsize: max size of the application
+func (feed *UprFeed) UprOpen(name string, sequence uint32, bufSize uint32) error {
+ var allFeaturesDisabled UprFeatures
+ err, _ := feed.uprOpen(name, sequence, bufSize, allFeaturesDisabled)
+ return err
+}
+
+// UprOpen with XATTR enabled.
+func (feed *UprFeed) UprOpenWithXATTR(name string, sequence uint32, bufSize uint32) error {
+ var onlyXattrEnabled UprFeatures
+ onlyXattrEnabled.Xattribute = true
+ err, _ := feed.uprOpen(name, sequence, bufSize, onlyXattrEnabled)
+ return err
+}
+
+func (feed *UprFeed) UprOpenWithFeatures(name string, sequence uint32, bufSize uint32, features UprFeatures) (error, UprFeatures) {
+ return feed.uprOpen(name, sequence, bufSize, features)
+}
+
+func (feed *UprFeed) SetPriorityAsync(p PriorityType) error {
+ if !feed.isOpen() {
+ // do not send this command if upr feed is not yet open, otherwise it may interfere with
+ // feed start up process, which relies on synchronous message exchange with DCP.
+ return fmt.Errorf("Upr feed is not open. State=%v", feed.getState())
+ }
+
+ return feed.setPriority(p, false /*sync*/)
+}
+
+func (feed *UprFeed) setPriority(p PriorityType, sync bool) error {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("set_priority"),
+ Body: []byte(p),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ if sync {
+ return sendMcRequestSync(feed.conn, rq)
+ } else {
+ return feed.writeToTransmitCh(rq)
+
+ }
+}
+
+func (feed *UprFeed) uprOpen(name string, sequence uint32, bufSize uint32, features UprFeatures) (err error, activatedFeatures UprFeatures) {
+ mc := feed.conn
+
+ // First set this to an invalid value to state that the method hasn't gotten to executing this control yet
+ activatedFeatures.CompressionType = CompressionTypeEndMarker
+
+ if err = doUprOpen(mc, name, sequence, features); err != nil {
+ return
+ }
+
+ activatedFeatures.Xattribute = features.Xattribute
+
+ // send a UPR control message to set the window size for the this connection
+ if bufSize > 0 {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("connection_buffer_size"),
+ Body: []byte(strconv.Itoa(int(bufSize))),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ feed.maxAckBytes = uint32(bufferAckThreshold * float32(bufSize))
+ }
+
+ // enable noop and set noop interval
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("enable_noop"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+
+ rq = &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("set_noop_interval"),
+ Body: []byte(strconv.Itoa(int(UPRDefaultNoopIntervalSeconds))),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+
+ feed.clientReadThreshold = features.clientReadThreshold
+
+ if features.EnableFlatbuffersSysEvents {
+ rq = &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("flatbuffers_system_events"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ }
+
+ if features.CompressionType == CompressionTypeSnappy {
+ activatedFeatures.CompressionType = CompressionTypeNone
+ rq = &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("force_value_compression"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ } else if features.CompressionType == CompressionTypeEndMarker {
+ err = fmt.Errorf("UPR_CONTROL Failed - Invalid CompressionType: %v", features.CompressionType)
+ }
+ if err != nil {
+ return
+ }
+ activatedFeatures.CompressionType = features.CompressionType
+
+ if features.DcpPriority != PriorityDisabled {
+ err = feed.setPriority(features.DcpPriority, true /*sync*/)
+ if err == nil {
+ activatedFeatures.DcpPriority = features.DcpPriority
+ } else {
+ return
+ }
+ }
+
+ if features.EnableExpiry {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("enable_expiry_opcode"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ activatedFeatures.EnableExpiry = true
+ }
+
+ if features.EnableStreamId {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("enable_stream_id"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ activatedFeatures.EnableStreamId = true
+ }
+
+ if features.EnableOso {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("enable_out_of_order_snapshots"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ activatedFeatures.EnableOso = true
+ }
+
+ if features.SendStreamEndOnClose {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("send_stream_end_on_client_close_stream"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ activatedFeatures.SendStreamEndOnClose = true
+ }
+
+ // everything is ok so far, set upr feed to open state
+ feed.activatedFeatures = activatedFeatures
+ feed.setOpen()
+ return
+}
+
+// UprRequestStream for a single vbucket.
+func (feed *UprFeed) UprRequestStream(vbno, opaqueMSB uint16, flags uint32,
+ vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
+
+ return feed.UprRequestCollectionsStream(vbno, opaqueMSB, flags, vuuid, startSequence, endSequence, snapStart, snapEnd, nil)
+}
+
+func (feed *UprFeed) initStreamType(filter *CollectionsFilter) (err error) {
+ if filter != nil && filter.UseStreamId && !feed.activatedFeatures.EnableStreamId {
+ err = fmt.Errorf("Cannot use streamID based filter if the feed was not started with the streamID feature")
+ return
+ }
+
+ streamInitFunc := func() {
+ if feed.streamsType != UninitializedStream {
+ // Shouldn't happen
+ err = fmt.Errorf("The current feed has already been started in %v mode", feed.streamsType.String())
+ } else {
+ if !feed.collectionEnabled {
+ feed.streamsType = NonCollectionStream
+ } else {
+ if filter != nil && filter.UseStreamId {
+ feed.streamsType = CollectionsStreamId
+ } else {
+ feed.streamsType = CollectionsNonStreamId
+ }
+ }
+ }
+ }
+ feed.initStreamTypeOnce.Do(streamInitFunc)
+ return
+}
+
+func (feed *UprFeed) UprRequestCollectionsStream(vbno, opaqueMSB uint16, flags uint32,
+ vbuuid, startSequence, endSequence, snapStart, snapEnd uint64, filter *CollectionsFilter) error {
+
+ err := feed.initStreamType(filter)
+ if err != nil {
+ return err
+ }
+
+ var mcRequestBody []byte
+ if filter != nil {
+ err = filter.IsValid()
+ if err != nil {
+ return err
+ }
+ mcRequestBody, err = filter.ToStreamReqBody()
+ if err != nil {
+ return err
+ }
+ }
+
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_STREAMREQ,
+ VBucket: vbno,
+ Opaque: composeOpaque(vbno, opaqueMSB),
+ Body: mcRequestBody,
+ }
+
+ rq.Extras = make([]byte, 48) // #Extras
+ binary.BigEndian.PutUint32(rq.Extras[:4], flags)
+ binary.BigEndian.PutUint32(rq.Extras[4:8], uint32(0))
+ binary.BigEndian.PutUint64(rq.Extras[8:16], startSequence)
+ binary.BigEndian.PutUint64(rq.Extras[16:24], endSequence)
+ binary.BigEndian.PutUint64(rq.Extras[24:32], vbuuid)
+ binary.BigEndian.PutUint64(rq.Extras[32:40], snapStart)
+ binary.BigEndian.PutUint64(rq.Extras[40:48], snapEnd)
+
+ feed.negotiator.registerRequest(vbno, opaqueMSB, vbuuid, startSequence, endSequence)
+ // Any client that has ever called this method, regardless of return code,
+ // should expect a potential UPR_CLOSESTREAM message due to this new map entry prior to Transmit.
+
+ if err = feed.conn.Transmit(rq); err != nil {
+ logging.Errorf("Error in StreamRequest %s", err.Error())
+ // If an error occurs during transmit, then the UPRFeed will keep the stream
+ // in the vbstreams map. This is to prevent nil lookup from any previously
+ // sent stream requests.
+ return err
+ }
+
+ return nil
+}
+
+// CloseStream for specified vbucket.
+func (feed *UprFeed) CloseStream(vbno, opaqueMSB uint16) error {
+
+ err := feed.validateCloseStream(vbno)
+ if err != nil {
+ logging.Infof("CloseStream for %v has been skipped because of error %v", vbno, err)
+ return err
+ }
+
+ closeStream := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CLOSESTREAM,
+ VBucket: vbno,
+ Opaque: composeOpaque(vbno, opaqueMSB),
+ }
+
+ feed.writeToTransmitCh(closeStream)
+
+ feed.closeStreamReqMtx.Lock()
+ feed.closeStreamRequested[vbno] = true
+ feed.closeStreamReqMtx.Unlock()
+ return nil
+}
+
+func (feed *UprFeed) GetUprEventCh() <-chan *UprEvent {
+ return feed.C
+}
+
+func (feed *UprFeed) GetError() error {
+ return feed.Error
+}
+
+const StreamNotRequested = "has not been requested"
+
+func (feed *UprFeed) validateCloseStream(vbno uint16) error {
+ feed.muVbstreams.RLock()
+ nilVbStream := feed.vbstreams[vbno] == nil
+ feed.muVbstreams.RUnlock()
+
+ if nilVbStream && (feed.negotiator.getStreamsCntFromMap(vbno) == 0) {
+ return fmt.Errorf("Stream for vb %d %v", vbno, StreamNotRequested)
+ }
+
+ return nil
+}
+
+func (feed *UprFeed) writeToTransmitCh(rq *gomemcached.MCRequest) error {
+ // write to transmitCh may block forever if sendCommands has exited
+ // check for feed closure to have an exit route in this case
+ select {
+ case <-feed.closer:
+ errMsg := fmt.Sprintf("Abort sending request to transmitCh because feed has been closed. request=%v", rq)
+ logging.Infof(errMsg)
+ return errors.New(errMsg)
+ case feed.transmitCh <- rq:
+ }
+ return nil
+}
+
+// StartFeed to start the upper feed.
+func (feed *UprFeed) StartFeed() error {
+ return feed.StartFeedWithConfig(10)
+}
+
+func (feed *UprFeed) StartFeedWithConfig(datachan_len int) error {
+ ch := make(chan *UprEvent, datachan_len)
+ feed.C = ch
+ go feed.runFeed(ch)
+ return nil
+}
+
+func parseFailoverLog(body []byte) (*FailoverLog, error) {
+ if len(body)%16 != 0 {
+ err := fmt.Errorf("invalid body length %v, in failover-log", len(body))
+ return nil, err
+ }
+ log := make(FailoverLog, len(body)/16)
+ for i, j := 0, 0; i < len(body); i += 16 {
+ vuuid := binary.BigEndian.Uint64(body[i : i+8])
+ seqno := binary.BigEndian.Uint64(body[i+8 : i+16])
+ log[j] = [2]uint64{vuuid, seqno}
+ j++
+ }
+ return &log, nil
+}
+
+func parseGetSeqnoResp(body []byte) (*VBSeqnos, error) {
+ // vbno of 2 bytes + seqno of 8 bytes
+ var entryLen int = 10
+
+ if len(body)%entryLen != 0 {
+ err := fmt.Errorf("invalid body length %v, in getVbSeqno", len(body))
+ return nil, err
+ }
+ vbSeqnos := make(VBSeqnos, len(body)/entryLen)
+ for i, j := 0, 0; i < len(body); i += entryLen {
+ vbno := binary.BigEndian.Uint16(body[i : i+2])
+ seqno := binary.BigEndian.Uint64(body[i+2 : i+10])
+ vbSeqnos[j] = [2]uint64{uint64(vbno), seqno}
+ j++
+ }
+ return &vbSeqnos, nil
+}
+
+func handleStreamRequest(
+ res *gomemcached.MCResponse,
+ headerBuf []byte,
+) (gomemcached.Status, uint64, *FailoverLog, error) {
+
+ var rollback uint64
+ var err error
+
+ switch {
+ case res.Status == gomemcached.ROLLBACK:
+ logging.Infof("Rollback response. body=%v, headerBuf=%v\n", res.Body, headerBuf)
+ rollback = binary.BigEndian.Uint64(res.Body)
+ logging.Infof("Rollback seqno is %v for response with opaque %v\n", rollback, res.Opaque)
+ return res.Status, rollback, nil, nil
+
+ case res.Status != gomemcached.SUCCESS:
+ err = fmt.Errorf("unexpected status %v for response with opaque %v", res.Status, res.Opaque)
+ return res.Status, 0, nil, err
+ }
+
+ flog, err := parseFailoverLog(res.Body[:])
+ return res.Status, rollback, flog, err
+}
+
+// generate stream end responses for all active vb streams
+func (feed *UprFeed) doStreamClose(ch chan *UprEvent) {
+ feed.muVbstreams.RLock()
+
+ uprEvents := make([]*UprEvent, len(feed.vbstreams))
+ index := 0
+ for vbno, stream := range feed.vbstreams {
+ uprEvent := &UprEvent{
+ VBucket: vbno,
+ VBuuid: stream.Vbuuid,
+ Opcode: gomemcached.UPR_STREAMEND,
+ }
+ uprEvents[index] = uprEvent
+ index++
+ }
+
+ // release the lock before sending uprEvents to ch, which may block
+ feed.muVbstreams.RUnlock()
+
+loop:
+ for _, uprEvent := range uprEvents {
+ select {
+ case ch <- uprEvent:
+ case <-feed.closer:
+ logging.Infof("Feed has been closed. Aborting doStreamClose.")
+ break loop
+ }
+ }
+}
+
+func (feed *UprFeed) runFeed(ch chan *UprEvent) {
+ defer close(ch)
+ defer logging.Infof("runFeed exiting")
+ defer feed.Close()
+
+ var headerBuf [gomemcached.HDR_LEN]byte
+ var pkt gomemcached.MCRequest
+ var event *UprEvent
+
+ rawConn := feed.conn.Hijack()
+ uprStats := &feed.stats
+
+ for {
+ select {
+ case <-feed.closer:
+ logging.Infof("Feed has been closed. Exiting.")
+ return
+ default:
+ if feed.clientReadThreshold > 0 {
+ // refreshing the connection by extending the deadline to receive the next event by;
+ // after which the Producer will be considered "stuck" & disconnected from
+ rawConn.SetReadDeadline(time.Now().Add(time.Duration(feed.clientReadThreshold) * time.Second))
+ }
+ bytes, err := pkt.Receive(rawConn, headerBuf[:])
+
+ if err != nil {
+ if errors.Is(err, os.ErrDeadlineExceeded) {
+ logging.Errorf("failed to receive a message from the DCP Producer before crossing the dead-connection threshold (%vs)", feed.clientReadThreshold)
+ } else {
+ logging.Errorf("Error in receive %s", err.Error())
+ }
+ feed.Error = err
+ // send all the stream close messages to the client
+ feed.doStreamClose(ch)
+ return
+ }
+
+ event = nil
+ res := &gomemcached.MCResponse{
+ Opcode: pkt.Opcode,
+ Cas: pkt.Cas,
+ Opaque: pkt.Opaque,
+ Status: gomemcached.Status(pkt.VBucket),
+ Extras: pkt.Extras,
+ Key: pkt.Key,
+ Body: pkt.Body,
+ }
+
+ vb := vbOpaque(pkt.Opaque)
+ appOpaque := appOpaque(pkt.Opaque)
+ uprStats.TotalBytes = uint64(bytes)
+
+ feed.muVbstreams.RLock()
+ stream := feed.vbstreams[vb]
+ feed.muVbstreams.RUnlock()
+
+ switch pkt.Opcode {
+ case gomemcached.UPR_STREAMREQ:
+ event, err = feed.negotiator.handleStreamRequest(feed, headerBuf, &pkt, bytes, res)
+ if err != nil {
+ logging.Infof(err.Error())
+ return
+ }
+ case gomemcached.UPR_MUTATION,
+ gomemcached.UPR_DELETION,
+ gomemcached.UPR_EXPIRATION:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ uprStats.TotalMutation++
+
+ case gomemcached.UPR_STREAMEND:
+ feed.closeStreamReqMtx.RLock()
+ closeStreamRequested := feed.closeStreamRequested[vb]
+ feed.closeStreamReqMtx.RUnlock()
+ if stream == nil && (!closeStreamRequested && !feed.activatedFeatures.SendStreamEndOnClose) {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ //stream has ended
+ event = makeUprEvent(pkt, stream, bytes)
+ logging.Infof("Stream Ended for vb %d", vb)
+
+ feed.negotiator.deleteStreamFromMap(vb, appOpaque)
+ feed.cleanUpVbStream(vb)
+
+ case gomemcached.UPR_SNAPSHOT:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ // snapshot marker
+ event = makeUprEvent(pkt, stream, bytes)
+ uprStats.TotalSnapShot++
+
+ case gomemcached.UPR_FLUSH:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ // special processing for flush ?
+ event = makeUprEvent(pkt, stream, bytes)
+
+ case gomemcached.UPR_CLOSESTREAM:
+ feed.closeStreamReqMtx.RLock()
+ closeStreamRequested := feed.closeStreamRequested[vb]
+ feed.closeStreamReqMtx.RUnlock()
+ if stream == nil && (!closeStreamRequested && !feed.activatedFeatures.SendStreamEndOnClose) {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ event.Opcode = gomemcached.UPR_STREAMEND // opcode re-write !!
+ logging.Infof("Stream Closed for vb %d StreamEnd simulated", vb)
+
+ feed.negotiator.deleteStreamFromMap(vb, appOpaque)
+ feed.cleanUpVbStream(vb)
+
+ case gomemcached.UPR_ADDSTREAM:
+ logging.Infof("Opcode %v not implemented", pkt.Opcode)
+
+ case gomemcached.UPR_CONTROL, gomemcached.UPR_BUFFERACK:
+ if res.Status != gomemcached.SUCCESS {
+ logging.Infof("Opcode %v received status %d", pkt.Opcode.String(), res.Status)
+ }
+
+ case gomemcached.UPR_NOOP:
+ // send a NOOP back
+ noop := &gomemcached.MCResponse{
+ Opcode: gomemcached.UPR_NOOP,
+ Opaque: pkt.Opaque,
+ }
+
+ if err := feed.conn.TransmitResponse(noop); err != nil {
+ logging.Warnf("failed to transmit command %s. Error %s", noop.Opcode.String(), err.Error())
+ }
+ case gomemcached.DCP_SYSTEM_EVENT:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ case gomemcached.UPR_FAILOVERLOG:
+ logging.Infof("Failover log for vb %d received: %v", vb, pkt)
+ case gomemcached.DCP_SEQNO_ADV:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ case gomemcached.DCP_OSO_SNAPSHOT:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ return
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ default:
+ logging.Infof("Recived an unknown response for vbucket %d", vb)
+ }
+
+ if event != nil {
+ select {
+ case ch <- event:
+ case <-feed.closer:
+ logging.Infof("Feed has been closed. Skip sending events. Exiting.")
+ return
+ }
+
+ feed.muVbstreams.RLock()
+ l := len(feed.vbstreams)
+ feed.muVbstreams.RUnlock()
+
+ if event.Opcode == gomemcached.UPR_CLOSESTREAM && l == 0 {
+ logging.Infof("No more streams")
+ }
+ }
+
+ if !feed.ackByClient {
+ // if client does not ack, do the ack check now
+ feed.sendBufferAckIfNeeded(event)
+ }
+ }
+ }
+}
+
+// Client, after completing processing of an UprEvent, need to call this API to notify UprFeed,
+// so that UprFeed can update its ack bytes stats and send ack to DCP if needed
+// Client needs to set ackByClient flag to true in NewUprFeedWithConfig() call as a prerequisite for this call to work
+// This API is not thread safe. Caller should NOT have more than one go rountine calling this API
+func (feed *UprFeed) ClientAck(event *UprEvent) error {
+ if !feed.ackByClient {
+ return errors.New("Upr feed does not have ackByclient flag set")
+ }
+ feed.sendBufferAckIfNeeded(event)
+ return nil
+}
+
+// increment ack bytes if the event needs to be acked to DCP
+// send buffer ack if enough ack bytes have been accumulated
+func (feed *UprFeed) sendBufferAckIfNeeded(event *UprEvent) {
+ if event == nil || event.AckSize == 0 {
+ // this indicates that there is no need to ack to DCP
+ return
+ }
+
+ totalBytes := feed.toAckBytes + event.AckSize
+ if totalBytes > feed.maxAckBytes {
+ feed.toAckBytes = 0
+ feed.sendBufferAck(totalBytes)
+ } else {
+ feed.toAckBytes = totalBytes
+ }
+}
+
+// send buffer ack to dcp
+func (feed *UprFeed) sendBufferAck(sendSize uint32) {
+ bufferAck := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_BUFFERACK,
+ }
+ bufferAck.Extras = make([]byte, 4)
+ binary.BigEndian.PutUint32(bufferAck.Extras[:4], uint32(sendSize))
+ feed.writeToTransmitCh(bufferAck)
+ feed.stats.TotalBufferAckSent++
+}
+
+func (feed *UprFeed) GetUprStats() *UprStats {
+ return &feed.stats
+}
+
+func composeOpaque(vbno, opaqueMSB uint16) uint32 {
+ return (uint32(opaqueMSB) << 16) | uint32(vbno)
+}
+
+func getUprOpenCtrlOpaque() uint32 {
+ return atomic.AddUint32(&opaqueOpenCtrlWell, 1)
+}
+
+func appOpaque(opq32 uint32) uint16 {
+ return uint16((opq32 & 0xFFFF0000) >> 16)
+}
+
+func vbOpaque(opq32 uint32) uint16 {
+ return uint16(opq32 & 0xFFFF)
+}
+
+// Close this UprFeed.
+func (feed *UprFeed) Close() {
+ feed.muFeedState.Lock()
+ if feed.feedState != FeedStateClosed {
+ close(feed.closer)
+ feed.feedState = FeedStateClosed
+ feed.negotiator.initialize()
+ }
+ feed.muFeedState.Unlock()
+ feed.transmitCloseOnce.Do(func() {
+ close(feed.transmitCl)
+ })
+}
+
+// check if the UprFeed has been closed
+func (feed *UprFeed) Closed() bool {
+ feed.muFeedState.RLock()
+ defer feed.muFeedState.RUnlock()
+ return feed.feedState == FeedStateClosed
+}
+
+// set upr feed to opened state after initialization is done
+func (feed *UprFeed) setOpen() {
+ feed.muFeedState.Lock()
+ defer feed.muFeedState.Unlock()
+ feed.feedState = FeedStateOpened
+}
+
+func (feed *UprFeed) isOpen() bool {
+ feed.muFeedState.RLock()
+ defer feed.muFeedState.RUnlock()
+ return feed.feedState == FeedStateOpened
+}
+
+func (feed *UprFeed) getState() FeedState {
+ feed.muFeedState.RLock()
+ defer feed.muFeedState.RUnlock()
+ return feed.feedState
+}
diff --git a/vendor/github.com/couchbase/gomemcached/flexibleFraming.go b/vendor/github.com/couchbase/gomemcached/flexibleFraming.go
new file mode 100644
index 00000000..82ea6ac5
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/flexibleFraming.go
@@ -0,0 +1,416 @@
+package gomemcached
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+type FrameObjType int
+
+const (
+ FrameBarrier FrameObjType = iota // 0
+ FrameDurability FrameObjType = iota // 1
+ FrameDcpStreamId FrameObjType = iota // 2
+ FrameOpenTracing FrameObjType = iota // 3
+ FrameImpersonate FrameObjType = iota // 4
+ FramePreserveExpiry FrameObjType = iota // 5
+)
+
+const MAX_USER_LEN = 128
+const FAST_FRAME_LEN = 15
+
+type FrameInfo struct {
+ ObjId FrameObjType
+ ObjLen int
+ ObjData []byte
+}
+
+var ErrorInvalidOp error = fmt.Errorf("Specified method is not applicable")
+var ErrorObjLenNotMatch error = fmt.Errorf("Object length does not match data")
+
+func (f *FrameInfo) Validate() error {
+ switch f.ObjId {
+ case FrameBarrier:
+ if f.ObjLen != 0 {
+ return fmt.Errorf("Invalid FrameBarrier - length is %v\n", f.ObjLen)
+ } else if f.ObjLen != len(f.ObjData) {
+ return ErrorObjLenNotMatch
+ }
+ case FrameDurability:
+ if f.ObjLen != 1 && f.ObjLen != 3 {
+ return fmt.Errorf("Invalid FrameDurability - length is %v\n", f.ObjLen)
+ } else if f.ObjLen != len(f.ObjData) {
+ return ErrorObjLenNotMatch
+ }
+ case FrameDcpStreamId:
+ if f.ObjLen != 2 {
+ return fmt.Errorf("Invalid FrameDcpStreamId - length is %v\n", f.ObjLen)
+ } else if f.ObjLen != len(f.ObjData) {
+ return ErrorObjLenNotMatch
+ }
+ case FrameOpenTracing:
+ if f.ObjLen != 1 {
+ return fmt.Errorf("Invalid FrameOpenTracing - length is %v\n", f.ObjLen)
+ } else if f.ObjLen != len(f.ObjData) {
+ return ErrorObjLenNotMatch
+ }
+ case FrameImpersonate:
+ case FramePreserveExpiry:
+ if f.ObjLen != 0 {
+ return fmt.Errorf("Invalid FramePreserveExpiry - length is %v\n", f.ObjLen)
+ } else if f.ObjLen != len(f.ObjData) {
+ return ErrorObjLenNotMatch
+ }
+ default:
+ return fmt.Errorf("Unknown FrameInfo type")
+ }
+ return nil
+}
+
+func (f *FrameInfo) GetStreamId() (uint16, error) {
+ if f.ObjId != FrameDcpStreamId {
+ return 0, ErrorInvalidOp
+ }
+
+ var output uint16
+ output = uint16(f.ObjData[0])
+ output = output << 8
+ output |= uint16(f.ObjData[1])
+ return output, nil
+}
+
+type DurabilityLvl uint8
+
+const (
+ DuraInvalid DurabilityLvl = iota // Not used (0x0)
+ DuraMajority DurabilityLvl = iota // (0x01)
+ DuraMajorityAndPersistOnMaster DurabilityLvl = iota // (0x02)
+ DuraPersistToMajority DurabilityLvl = iota // (0x03)
+)
+
+func (f *FrameInfo) GetDurabilityRequirements() (lvl DurabilityLvl, timeoutProvided bool, timeoutMs uint16, err error) {
+ if f.ObjId != FrameDurability {
+ err = ErrorInvalidOp
+ return
+ }
+ if f.ObjLen != 1 && f.ObjLen != 3 {
+ err = ErrorObjLenNotMatch
+ return
+ }
+
+ lvl = DurabilityLvl(uint8(f.ObjData[0]))
+
+ if f.ObjLen == 3 {
+ timeoutProvided = true
+ timeoutMs = binary.BigEndian.Uint16(f.ObjData[1:2])
+ }
+
+ return
+}
+
+func incrementMarker(bitsToBeIncremented, byteIncrementCnt *int, framingElen, curObjIdx int) (int, error) {
+ for *bitsToBeIncremented >= 8 {
+ *byteIncrementCnt++
+ *bitsToBeIncremented -= 8
+ }
+ marker := curObjIdx + *byteIncrementCnt
+ if marker > framingElen {
+ return -1, fmt.Errorf("Out of bounds")
+ }
+ return marker, nil
+}
+
+func (f *FrameInfo) Bytes() ([]byte, bool) {
+ return obj2Bytes(f.ObjId, f.ObjLen, f.ObjData)
+}
+
+func frameLen(len int) int {
+ if len < FAST_FRAME_LEN {
+ return len + 1
+ }
+ return len + 2
+}
+
+// halfByteRemaining will always be false, handled internally
+func obj2Bytes(id FrameObjType, len int, data []byte) (output []byte, halfByteRemaining bool) {
+ if len < FAST_FRAME_LEN {
+
+ // ObjIdentifier - 4 bits + ObjLength - 4 bits
+ var idAndLen uint8
+ idAndLen |= uint8(id) << 4
+ idAndLen |= uint8(len)
+ output = append(output, byte(idAndLen))
+
+ } else {
+
+ // ObjIdentifier - 4 bits + ObjLength - 4 bits
+ var idAndLen uint8
+ idAndLen |= uint8(id) << 4
+ idAndLen |= uint8(FAST_FRAME_LEN)
+ output = append(output, byte(idAndLen))
+ output = append(output, byte(len-FAST_FRAME_LEN))
+ }
+
+ // Rest is Data
+ output = append(output, data[:len]...)
+ return
+}
+
+func parseFrameInfoObjects(buf []byte, framingElen int) (objs []FrameInfo, err error, halfByteRemaining bool) {
+ var curObjIdx int
+ var byteIncrementCnt int
+ var bitsToBeIncremented int
+ var marker int
+
+ // Parse frameInfo objects
+ for curObjIdx = 0; curObjIdx < framingElen; curObjIdx += byteIncrementCnt {
+ byteIncrementCnt = 0
+ var oneFrameObj FrameInfo
+
+ // First get the objId
+ // -------------------------
+ var objId int
+ var objHeader uint8 = buf[curObjIdx]
+ var objIdentifierRaw uint8
+ if bitsToBeIncremented == 0 {
+ // ObjHeader
+ // 0 1 2 3 4 5 6 7
+ // ^-----^
+ // ObjIdentifierRaw
+ objIdentifierRaw = (objHeader & 0xf0) >> 4
+ } else {
+ // ObjHeader
+ // 0 1 2 3 4 5 6 7
+ // ^-----^
+ // ObjIdentifierRaw
+ objIdentifierRaw = (objHeader & 0x0f)
+ }
+ bitsToBeIncremented += 4
+
+ marker, err = incrementMarker(&bitsToBeIncremented, &byteIncrementCnt, framingElen, curObjIdx)
+ if err != nil {
+ return
+ }
+
+ // Value is 0-14
+ objId = int(objIdentifierRaw & 0xe)
+ // If bit 15 is set, ID is 15 + value of next byte
+ if objIdentifierRaw&0x1 > 0 {
+ if bitsToBeIncremented > 0 {
+ // ObjHeader
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ // ^-----^ ^---------------^
+ // ObjId1 Extension
+ // ^ marker
+ buffer := uint16(buf[marker])
+ buffer = buffer << 8
+ buffer |= uint16(buf[marker+1])
+ var extension uint8 = uint8(buffer & 0xff0 >> 4)
+ objId += int(extension)
+ } else {
+ // ObjHeader
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ // ^-----^ ^-------------------^
+ // ObjId1 extension
+ // ^ marker
+ var extension uint8 = uint8(buf[marker])
+ objId += int(extension)
+ }
+ bitsToBeIncremented += 8
+ }
+
+ marker, err = incrementMarker(&bitsToBeIncremented, &byteIncrementCnt, framingElen, curObjIdx)
+ if err != nil {
+ return
+ }
+ oneFrameObj.ObjId = FrameObjType(objId)
+
+ // Then get the obj length
+ // -------------------------
+ var objLenRaw uint8
+ var objLen int
+ if bitsToBeIncremented > 0 {
+ // ObjHeader
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ // ^ ^---------^
+ // marker objLen
+ objLenRaw = uint8(buf[marker]) & 0x0f
+ } else {
+ // ObjHeader
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+ // ^--------^
+ // objLen
+ // ^ marker
+ objLenRaw = uint8(buf[marker]) & 0xf0 >> 4
+ }
+ bitsToBeIncremented += 4
+
+ marker, err = incrementMarker(&bitsToBeIncremented, &byteIncrementCnt, framingElen, curObjIdx)
+ if err != nil {
+ return
+ }
+
+ // Length is 0-14
+ objLen = int(objLenRaw & 0xe)
+ // If bit 15 is set, lenghth is 15 + value of next byte
+ if objLenRaw&0x1 > 0 {
+ if bitsToBeIncremented == 0 {
+ // ObjHeader
+ // 12 13 14 15 16 17 18 19 20 21 22 23
+ // ^---------^ ^--------------------^
+ // objLen extension
+ // ^ marker
+ var extension uint8 = uint8(buf[marker])
+ objLen += int(extension)
+ } else {
+ // ObjHeader
+ // 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ // ^--------^ ^---------------------^
+ // objLen extension
+ // ^ marker var buffer uint16
+ buffer := uint16(buf[marker])
+ buffer = buffer << 8
+ buffer |= uint16(buf[marker+1])
+ var extension uint8 = uint8(buffer & 0xff0 >> 4)
+ objLen += int(extension)
+ }
+ bitsToBeIncremented += 8
+ }
+
+ marker, err = incrementMarker(&bitsToBeIncremented, &byteIncrementCnt, framingElen, curObjIdx)
+ if err != nil {
+ return
+ }
+ oneFrameObj.ObjLen = objLen
+
+ // The rest is N-bytes of data based on the length
+ if bitsToBeIncremented == 0 {
+ // No weird alignment needed
+ oneFrameObj.ObjData = buf[marker : marker+objLen]
+ } else {
+ // 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ // ^--------^ ^---------------------^ ^--------->
+ // objLen extension data
+ // ^ marker
+ oneFrameObj.ObjData = ShiftByteSliceLeft4Bits(buf[marker : marker+objLen+1])
+ }
+ err = oneFrameObj.Validate()
+ if err != nil {
+ return
+ }
+ objs = append(objs, oneFrameObj)
+
+ bitsToBeIncremented += 8 * objLen
+ marker, err = incrementMarker(&bitsToBeIncremented, &byteIncrementCnt, framingElen, curObjIdx)
+ }
+
+ if bitsToBeIncremented > 0 {
+ halfByteRemaining = true
+ }
+ return
+}
+
+func ShiftByteSliceLeft4Bits(slice []byte) (replacement []byte) {
+ var buffer uint16
+ var i int
+ sliceLen := len(slice)
+
+ if sliceLen < 2 {
+ // Let's not shift less than 16 bits
+ return
+ }
+
+ replacement = make([]byte, sliceLen, cap(slice))
+
+ for i = 0; i < sliceLen-1; i++ {
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ // ^-----^ ^---------------^ ^-----------
+ // garbage data byte 0 data byte 1
+ buffer = uint16(slice[i])
+ buffer = buffer << 8
+ buffer |= uint16(slice[i+1])
+ replacement[i] = uint8(buffer & 0xff0 >> 4)
+ }
+
+ if i < sliceLen {
+ lastByte := slice[sliceLen-1]
+ lastByte = lastByte << 4
+ replacement[i] = lastByte
+ }
+ return
+}
+
+// The following is used to theoretically support frameInfo ObjID extensions
+// for completeness, but they are not very efficient though
+func ShiftByteSliceRight4Bits(slice []byte) (replacement []byte) {
+ var buffer uint16
+ var i int
+ var leftovers uint8 // 4 bits only
+ var replacementUnit uint16
+ var first bool = true
+ var firstLeftovers uint8
+ var lastLeftovers uint8
+ sliceLen := len(slice)
+
+ if sliceLen < 2 {
+ // Let's not shift less than 16 bits
+ return
+ }
+
+ if slice[sliceLen-1]&0xf == 0 {
+ replacement = make([]byte, sliceLen, cap(slice))
+ } else {
+ replacement = make([]byte, sliceLen+1, cap(slice)+1)
+ }
+
+ for i = 0; i < sliceLen-1; i++ {
+ buffer = binary.BigEndian.Uint16(slice[i : i+2])
+ // (buffer)
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ // ^-------------^ ^-------------------^
+ // data byte 0 data byte 1
+ //
+ // into
+ //
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
+ // ^-----^ ^---------------^ ^--------------------^ ^----------^
+ // zeroes data byte 0 data byte 1 zeroes
+
+ if first {
+ // The leftover OR'ing will overwrite the first 4 bits of data byte 0. Save them
+ firstLeftovers = uint8(buffer & 0xf000 >> 12)
+ first = false
+ }
+ replacementUnit = 0
+ replacementUnit |= uint16(leftovers) << 12
+ replacementUnit |= (buffer & 0xff00) >> 4 // data byte 0
+ replacementUnit |= buffer & 0xff >> 4 // data byte 1 first 4 bits
+ lastLeftovers = uint8(buffer&0xf) << 4
+
+ replacement[i+1] = byte(replacementUnit)
+
+ leftovers = uint8((buffer & 0x000f) << 4)
+ }
+
+ replacement[0] = byte(uint8(replacement[0]) | firstLeftovers)
+ if lastLeftovers > 0 {
+ replacement[sliceLen] = byte(lastLeftovers)
+ }
+ return
+}
+
+func Merge2HalfByteSlices(src1, src2 []byte) (output []byte) {
+ src1Len := len(src1)
+ src2Len := len(src2)
+ output = make([]byte, src1Len+src2Len-1)
+
+ var mergeByte uint8 = src1[src1Len-1]
+ mergeByte |= uint8(src2[0])
+
+ copy(output, src1)
+ copy(output[src1Len:], src2[1:])
+
+ output[src1Len-1] = byte(mergeByte)
+
+ return
+}
diff --git a/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/Collection.go b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/Collection.go
new file mode 100644
index 00000000..9242f7f4
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/Collection.go
@@ -0,0 +1,195 @@
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package systemevents
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Collection struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsCollection(buf []byte, offset flatbuffers.UOffsetT) *Collection {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Collection{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func FinishCollectionBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.Finish(offset)
+}
+
+func GetSizePrefixedRootAsCollection(buf []byte, offset flatbuffers.UOffsetT) *Collection {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &Collection{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func FinishSizePrefixedCollectionBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.FinishSizePrefixed(offset)
+}
+
+func (rcv *Collection) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Collection) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Collection) Uid() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Collection) MutateUid(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(4, n)
+}
+
+func (rcv *Collection) ScopeId() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Collection) MutateScopeId(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(6, n)
+}
+
+func (rcv *Collection) CollectionId() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Collection) MutateCollectionId(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(8, n)
+}
+
+func (rcv *Collection) TtlValid() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *Collection) MutateTtlValid(n bool) bool {
+ return rcv._tab.MutateBoolSlot(10, n)
+}
+
+func (rcv *Collection) MaxTtl() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Collection) MutateMaxTtl(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(12, n)
+}
+
+func (rcv *Collection) Name() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func (rcv *Collection) History() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *Collection) MutateHistory(n bool) bool {
+ return rcv._tab.MutateBoolSlot(16, n)
+}
+
+func (rcv *Collection) DefaultCollectionMvs() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Collection) MutateDefaultCollectionMvs(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(18, n)
+}
+
+func (rcv *Collection) Metered() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *Collection) MutateMetered(n bool) bool {
+ return rcv._tab.MutateBoolSlot(20, n)
+}
+
+func (rcv *Collection) FlushUid() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Collection) MutateFlushUid(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(22, n)
+}
+
+func CollectionStart(builder *flatbuffers.Builder) {
+ builder.StartObject(10)
+}
+func CollectionAddUid(builder *flatbuffers.Builder, uid uint64) {
+ builder.PrependUint64Slot(0, uid, 0)
+}
+func CollectionAddScopeId(builder *flatbuffers.Builder, scopeId uint32) {
+ builder.PrependUint32Slot(1, scopeId, 0)
+}
+func CollectionAddCollectionId(builder *flatbuffers.Builder, collectionId uint32) {
+ builder.PrependUint32Slot(2, collectionId, 0)
+}
+func CollectionAddTtlValid(builder *flatbuffers.Builder, ttlValid bool) {
+ builder.PrependBoolSlot(3, ttlValid, false)
+}
+func CollectionAddMaxTtl(builder *flatbuffers.Builder, maxTtl uint32) {
+ builder.PrependUint32Slot(4, maxTtl, 0)
+}
+func CollectionAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(name), 0)
+}
+func CollectionAddHistory(builder *flatbuffers.Builder, history bool) {
+ builder.PrependBoolSlot(6, history, false)
+}
+func CollectionAddDefaultCollectionMvs(builder *flatbuffers.Builder, defaultCollectionMvs uint64) {
+ builder.PrependUint64Slot(7, defaultCollectionMvs, 0)
+}
+func CollectionAddMetered(builder *flatbuffers.Builder, metered bool) {
+ builder.PrependBoolSlot(8, metered, false)
+}
+func CollectionAddFlushUid(builder *flatbuffers.Builder, flushUid uint64) {
+ builder.PrependUint64Slot(9, flushUid, 0)
+}
+func CollectionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/DroppedCollection.go b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/DroppedCollection.go
new file mode 100644
index 00000000..51468617
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/DroppedCollection.go
@@ -0,0 +1,109 @@
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package systemevents
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type DroppedCollection struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDroppedCollection(buf []byte, offset flatbuffers.UOffsetT) *DroppedCollection {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &DroppedCollection{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func FinishDroppedCollectionBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.Finish(offset)
+}
+
+func GetSizePrefixedRootAsDroppedCollection(buf []byte, offset flatbuffers.UOffsetT) *DroppedCollection {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &DroppedCollection{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func FinishSizePrefixedDroppedCollectionBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.FinishSizePrefixed(offset)
+}
+
+func (rcv *DroppedCollection) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *DroppedCollection) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *DroppedCollection) Uid() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *DroppedCollection) MutateUid(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(4, n)
+}
+
+func (rcv *DroppedCollection) ScopeId() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *DroppedCollection) MutateScopeId(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(6, n)
+}
+
+func (rcv *DroppedCollection) CollectionId() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *DroppedCollection) MutateCollectionId(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(8, n)
+}
+
+func (rcv *DroppedCollection) SystemCollection() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *DroppedCollection) MutateSystemCollection(n bool) bool {
+ return rcv._tab.MutateBoolSlot(10, n)
+}
+
+func DroppedCollectionStart(builder *flatbuffers.Builder) {
+ builder.StartObject(4)
+}
+func DroppedCollectionAddUid(builder *flatbuffers.Builder, uid uint64) {
+ builder.PrependUint64Slot(0, uid, 0)
+}
+func DroppedCollectionAddScopeId(builder *flatbuffers.Builder, scopeId uint32) {
+ builder.PrependUint32Slot(1, scopeId, 0)
+}
+func DroppedCollectionAddCollectionId(builder *flatbuffers.Builder, collectionId uint32) {
+ builder.PrependUint32Slot(2, collectionId, 0)
+}
+func DroppedCollectionAddSystemCollection(builder *flatbuffers.Builder, systemCollection bool) {
+ builder.PrependBoolSlot(3, systemCollection, false)
+}
+func DroppedCollectionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/DroppedScope.go b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/DroppedScope.go
new file mode 100644
index 00000000..f14e99a3
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/DroppedScope.go
@@ -0,0 +1,94 @@
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package systemevents
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type DroppedScope struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDroppedScope(buf []byte, offset flatbuffers.UOffsetT) *DroppedScope {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &DroppedScope{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func FinishDroppedScopeBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.Finish(offset)
+}
+
+func GetSizePrefixedRootAsDroppedScope(buf []byte, offset flatbuffers.UOffsetT) *DroppedScope {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &DroppedScope{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func FinishSizePrefixedDroppedScopeBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.FinishSizePrefixed(offset)
+}
+
+func (rcv *DroppedScope) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *DroppedScope) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *DroppedScope) Uid() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *DroppedScope) MutateUid(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(4, n)
+}
+
+func (rcv *DroppedScope) ScopeId() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *DroppedScope) MutateScopeId(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(6, n)
+}
+
+func (rcv *DroppedScope) SystemScope() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *DroppedScope) MutateSystemScope(n bool) bool {
+ return rcv._tab.MutateBoolSlot(8, n)
+}
+
+func DroppedScopeStart(builder *flatbuffers.Builder) {
+ builder.StartObject(3)
+}
+func DroppedScopeAddUid(builder *flatbuffers.Builder, uid uint64) {
+ builder.PrependUint64Slot(0, uid, 0)
+}
+func DroppedScopeAddScopeId(builder *flatbuffers.Builder, scopeId uint32) {
+ builder.PrependUint32Slot(1, scopeId, 0)
+}
+func DroppedScopeAddSystemScope(builder *flatbuffers.Builder, systemScope bool) {
+ builder.PrependBoolSlot(2, systemScope, false)
+}
+func DroppedScopeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/Scope.go b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/Scope.go
new file mode 100644
index 00000000..80da1076
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/internal/flatbuffers/systemevents/Scope.go
@@ -0,0 +1,90 @@
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package systemevents
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Scope struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsScope(buf []byte, offset flatbuffers.UOffsetT) *Scope {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Scope{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func FinishScopeBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.Finish(offset)
+}
+
+func GetSizePrefixedRootAsScope(buf []byte, offset flatbuffers.UOffsetT) *Scope {
+ n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
+ x := &Scope{}
+ x.Init(buf, n+offset+flatbuffers.SizeUint32)
+ return x
+}
+
+func FinishSizePrefixedScopeBuffer(builder *flatbuffers.Builder, offset flatbuffers.UOffsetT) {
+ builder.FinishSizePrefixed(offset)
+}
+
+func (rcv *Scope) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Scope) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Scope) Uid() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Scope) MutateUid(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(4, n)
+}
+
+func (rcv *Scope) ScopeId() uint32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetUint32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Scope) MutateScopeId(n uint32) bool {
+ return rcv._tab.MutateUint32Slot(6, n)
+}
+
+func (rcv *Scope) Name() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func ScopeStart(builder *flatbuffers.Builder) {
+ builder.StartObject(3)
+}
+func ScopeAddUid(builder *flatbuffers.Builder, uid uint64) {
+ builder.PrependUint64Slot(0, uid, 0)
+}
+func ScopeAddScopeId(builder *flatbuffers.Builder, scopeId uint32) {
+ builder.PrependUint32Slot(1, scopeId, 0)
+}
+func ScopeAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(name), 0)
+}
+func ScopeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_constants.go b/vendor/github.com/couchbase/gomemcached/mc_constants.go
new file mode 100644
index 00000000..d4e31127
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/mc_constants.go
@@ -0,0 +1,479 @@
+// Package gomemcached is binary protocol packet formats and constants.
+package gomemcached
+
+import (
+ "fmt"
+)
+
+const (
+ REQ_MAGIC = 0x80
+ RES_MAGIC = 0x81
+ FLEX_MAGIC = 0x08
+ FLEX_RES_MAGIC = 0x18
+)
+
+// CommandCode for memcached packets.
+type CommandCode uint8
+
+const (
+ GET = CommandCode(0x00)
+ SET = CommandCode(0x01)
+ ADD = CommandCode(0x02)
+ REPLACE = CommandCode(0x03)
+ DELETE = CommandCode(0x04)
+ INCREMENT = CommandCode(0x05)
+ DECREMENT = CommandCode(0x06)
+ QUIT = CommandCode(0x07)
+ FLUSH = CommandCode(0x08)
+ GETQ = CommandCode(0x09)
+ NOOP = CommandCode(0x0a)
+ VERSION = CommandCode(0x0b)
+ GETK = CommandCode(0x0c)
+ GETKQ = CommandCode(0x0d)
+ APPEND = CommandCode(0x0e)
+ PREPEND = CommandCode(0x0f)
+ STAT = CommandCode(0x10)
+ SETQ = CommandCode(0x11)
+ ADDQ = CommandCode(0x12)
+ REPLACEQ = CommandCode(0x13)
+ DELETEQ = CommandCode(0x14)
+ INCREMENTQ = CommandCode(0x15)
+ DECREMENTQ = CommandCode(0x16)
+ QUITQ = CommandCode(0x17)
+ FLUSHQ = CommandCode(0x18)
+ APPENDQ = CommandCode(0x19)
+ AUDIT = CommandCode(0x27)
+ PREPENDQ = CommandCode(0x1a)
+ GAT = CommandCode(0x1d)
+ HELLO = CommandCode(0x1f)
+ RGET = CommandCode(0x30)
+ RSET = CommandCode(0x31)
+ RSETQ = CommandCode(0x32)
+ RAPPEND = CommandCode(0x33)
+ RAPPENDQ = CommandCode(0x34)
+ RPREPEND = CommandCode(0x35)
+ RPREPENDQ = CommandCode(0x36)
+ RDELETE = CommandCode(0x37)
+ RDELETEQ = CommandCode(0x38)
+ RINCR = CommandCode(0x39)
+ RINCRQ = CommandCode(0x3a)
+ RDECR = CommandCode(0x3b)
+ RDECRQ = CommandCode(0x3c)
+
+ SASL_LIST_MECHS = CommandCode(0x20)
+ SASL_AUTH = CommandCode(0x21)
+ SASL_STEP = CommandCode(0x22)
+
+ SET_BUCKET_THROTTLE_PROPERTIES = CommandCode(0x2a)
+ SET_BUCKET_DATA_LIMIT_EXCEEDED = CommandCode(0x2b)
+ SET_NODE_THROTTLE_PROPERTIES = CommandCode(0x2c)
+
+ SET_VBUCKET = CommandCode(0x3d)
+
+ TAP_CONNECT = CommandCode(0x40) // Client-sent request to initiate Tap feed
+ TAP_MUTATION = CommandCode(0x41) // Notification of a SET/ADD/REPLACE/etc. on the server
+ TAP_DELETE = CommandCode(0x42) // Notification of a DELETE on the server
+ TAP_FLUSH = CommandCode(0x43) // Replicates a flush_all command
+ TAP_OPAQUE = CommandCode(0x44) // Opaque control data from the engine
+ TAP_VBUCKET_SET = CommandCode(0x45) // Sets state of vbucket in receiver (used in takeover)
+ TAP_CHECKPOINT_START = CommandCode(0x46) // Notifies start of new checkpoint
+ TAP_CHECKPOINT_END = CommandCode(0x47) // Notifies end of checkpoint
+ GET_ALL_VB_SEQNOS = CommandCode(0x48) // Get current high sequence numbers from all vbuckets located on the server
+
+ // Like GET, but returns the document with the entire xattr section prefixed.
+ // Needs JSON, XATTR and SnappyEverywhere features enabled in the server.
+ GETEX = CommandCode(0x49)
+ GETEX_REPLICA = CommandCode(0x4a)
+
+ UPR_OPEN = CommandCode(0x50) // Open a UPR connection with a name
+ UPR_ADDSTREAM = CommandCode(0x51) // Sent by ebucketMigrator to UPR Consumer
+ UPR_CLOSESTREAM = CommandCode(0x52) // Sent by eBucketMigrator to UPR Consumer
+ UPR_FAILOVERLOG = CommandCode(0x54) // Request failover logs
+ UPR_STREAMREQ = CommandCode(0x53) // Stream request from consumer to producer
+ UPR_STREAMEND = CommandCode(0x55) // Sent by producer when it has no more messages to stream
+ UPR_SNAPSHOT = CommandCode(0x56) // Start of a new snapshot
+ UPR_MUTATION = CommandCode(0x57) // Key mutation
+ UPR_DELETION = CommandCode(0x58) // Key deletion
+ UPR_EXPIRATION = CommandCode(0x59) // Key expiration
+ UPR_FLUSH = CommandCode(0x5a) // Delete all the data for a vbucket
+ UPR_NOOP = CommandCode(0x5c) // UPR NOOP
+ UPR_BUFFERACK = CommandCode(0x5d) // UPR Buffer Acknowledgement
+ UPR_CONTROL = CommandCode(0x5e) // Set flow control params
+
+ GET_REPLICA = CommandCode(0x83) // Get from replica
+ SELECT_BUCKET = CommandCode(0x89) // Select bucket
+
+ OBSERVE_SEQNO = CommandCode(0x91) // Sequence Number based Observe
+ OBSERVE = CommandCode(0x92)
+
+ GET_META = CommandCode(0xA0) // Get meta. returns with expiry, flags, cas etc
+ SET_WITH_META = CommandCode(0xa2)
+ ADD_WITH_META = CommandCode(0xa4)
+ DELETE_WITH_META = CommandCode(0xa8)
+ GET_RANDOM_KEY = CommandCode(0xb6)
+ GET_COLLECTIONS_MANIFEST = CommandCode(0xba) // Get entire collections manifest.
+ COLLECTIONS_GET_CID = CommandCode(0xbb) // Get collection id.
+ SET_TIME_SYNC = CommandCode(0xc1)
+
+ SUBDOC_GET = CommandCode(0xc5) // Get subdoc. Returns with xattrs
+ SUBDOC_DICT_UPSERT = CommandCode(0xc8)
+ SUBDOC_DELETE = CommandCode(0xc9) // Delete a path
+ SUBDOC_COUNTER = CommandCode(0xcf)
+ SUBDOC_MULTI_LOOKUP = CommandCode(0xd0) // Multi lookup. Doc xattrs and meta.
+ SUBDOC_MULTI_MUTATION = CommandCode(0xd1) // Multi mutation. Doc and xattr
+
+ DCP_SYSTEM_EVENT = CommandCode(0x5f) // A system event has occurred
+ DCP_SEQNO_ADV = CommandCode(0x64) // Sent when the vb seqno has advanced due to an unsubscribed event
+ DCP_OSO_SNAPSHOT = CommandCode(0x65) // Marks the begin and end of out-of-sequence-number stream
+
+ CREATE_RANGE_SCAN = CommandCode(0xda) // Range scans
+ CONTINUE_RANGE_SCAN = CommandCode(0xdb) // Range scans
+ CANCEL_RANGE_SCAN = CommandCode(0xdc) // Range scans
+
+ GET_ERROR_MAP = CommandCode(0xfe)
+)
+
+// command codes that are counted toward DCP control buffer
+// when DCP clients receive DCP messages with these command codes, they need to provide acknowledgement
+var BufferedCommandCodeMap = map[CommandCode]bool{
+ SET_VBUCKET: true,
+ UPR_STREAMEND: true,
+ UPR_SNAPSHOT: true,
+ UPR_MUTATION: true,
+ UPR_DELETION: true,
+ UPR_EXPIRATION: true,
+ DCP_SYSTEM_EVENT: true,
+ DCP_SEQNO_ADV: true,
+ DCP_OSO_SNAPSHOT: true,
+}
+
+// Status field for memcached response.
+type Status uint16
+
+// Matches with protocol/status.h as source of truth
+const (
+ SUCCESS = Status(0x00)
+ KEY_ENOENT = Status(0x01)
+ KEY_EEXISTS = Status(0x02)
+ E2BIG = Status(0x03)
+ EINVAL = Status(0x04)
+ NOT_STORED = Status(0x05)
+ DELTA_BADVAL = Status(0x06)
+ NOT_MY_VBUCKET = Status(0x07)
+ NO_BUCKET = Status(0x08)
+ LOCKED = Status(0x09)
+ WOULD_THROTTLE = Status(0x0c)
+ CONFIG_ONLY = Status(0x0d)
+ CAS_VALUE_INVALID = Status(0x0f)
+ AUTH_STALE = Status(0x1f)
+ AUTH_ERROR = Status(0x20)
+ AUTH_CONTINUE = Status(0x21)
+ ERANGE = Status(0x22)
+ ROLLBACK = Status(0x23)
+ EACCESS = Status(0x24)
+ NOT_INITIALIZED = Status(0x25)
+
+ RATE_LIMITED_NETWORK_INGRESS = Status(0x30)
+ RATE_LIMITED_NETWORK_EGRESS = Status(0x31)
+ RATE_LIMITED_MAX_CONNECTIONS = Status(0x32)
+ RATE_LIMITED_MAX_COMMANDS = Status(0x33)
+ SCOPE_SIZE_LIMIT_EXCEEDED = Status(0x34)
+ BUCKET_SIZE_LIMIT_EXCEEDED = Status(0x35)
+
+ BUCKET_RESIDENT_RATIO_TOO_LOW = Status(0x36)
+ BUCKET_DATA_SIZE_TOO_BIG = Status(0x37)
+ BUCKET_DISK_SPACE_TOO_LOW = Status(0x38)
+
+ UNKNOWN_COMMAND = Status(0x81)
+ ENOMEM = Status(0x82)
+ NOT_SUPPORTED = Status(0x83)
+ EINTERNAL = Status(0x84)
+ EBUSY = Status(0x85)
+ TMPFAIL = Status(0x86)
+ XATTR_EINVAL = Status(0x87)
+ UNKNOWN_COLLECTION = Status(0x88)
+
+ DURABILITY_INVALID_LEVEL = Status(0xa0)
+ DURABILITY_IMPOSSIBLE = Status(0xa1)
+ SYNC_WRITE_IN_PROGRESS = Status(0xa2)
+ SYNC_WRITE_AMBIGUOUS = Status(0xa3)
+ SYNC_WRITE_RECOMMITINPROGRESS = Status(0xa4)
+
+ RANGE_SCAN_MORE = Status(0xa6)
+ RANGE_SCAN_COMPLETE = Status(0xa7)
+
+ // SUBDOC
+ SUBDOC_PATH_NOT_FOUND = Status(0xc0)
+ SUBDOC_INVALID_COMBO = Status(0xcb)
+ SUBDOC_BAD_MULTI = Status(0xcc) // SubdocMultiPathFailure
+ SUBDOC_SUCCESS_DELETED = Status(0xcd)
+ SUBDOC_MULTI_PATH_FAILURE_DELETED = Status(0xd3)
+ // Not a Memcached status
+ UNKNOWN_STATUS = Status(0xffff)
+)
+
+const (
+ // doc level flags for subdoc commands
+ SUBDOC_FLAG_NONE uint8 = 0x00
+ SUBDOC_FLAG_MKDOC uint8 = 0x01 // Create if it does not exist
+ SUBDOC_FLAG_ADD uint8 = 0x02 // Add doc only if it does not exist.
+ SUBDOC_FLAG_ACCESS_DELETED uint8 = 0x04 // allow access to XATTRs for deleted document
+ SUBDOC_FLAG_CREATE_AS_DELETED uint8 = 0x08 // Used with mkdoc/add
+ SUBDOC_FLAG_REVIVED_DOC uint8 = 0x10
+
+ // path level flags for subdoc commands
+ SUBDOC_FLAG_NONE_PATH uint8 = 0x00
+ SUBDOC_FLAG_MKDIR_PATH uint8 = 0x01 // create path
+ SUBDOC_FLAG_XATTR_PATH uint8 = 0x04 // if set, the path refers to an XATTR
+ SUBDOC_FLAG_EXPAND_MACRRO_PATH uint8 = 0x10 // Expand macro value inside XATTR
+)
+
+// for log redaction
+const (
+ UdTagBegin = ""
+ UdTagEnd = ""
+)
+
+var isFatal = map[Status]bool{
+ DELTA_BADVAL: true,
+ NO_BUCKET: true,
+ AUTH_STALE: true,
+ AUTH_ERROR: true,
+ ERANGE: true,
+ ROLLBACK: true,
+ EACCESS: true,
+ ENOMEM: true,
+ NOT_SUPPORTED: true,
+
+ // consider statuses coming from outside couchbase (eg OS errors) as fatal for the connection
+ // as there might be unread data left over on the wire
+ UNKNOWN_STATUS: true,
+}
+
+// the producer/consumer bit in dcp flags
+var DCP_PRODUCER uint32 = 0x01
+
+// the include XATTRS bit in dcp flags
+var DCP_OPEN_INCLUDE_XATTRS uint32 = 0x04
+
+// the include deletion time bit in dcp flags
+var DCP_OPEN_INCLUDE_DELETE_TIMES uint32 = 0x20
+
+// Datatype to Include XATTRS in SUBDOC GET
+var SUBDOC_FLAG_XATTR uint8 = 0x04
+
+// MCItem is an internal representation of an item.
+type MCItem struct {
+ Cas uint64
+ Flags, Expiration uint32
+ Data []byte
+}
+
+// Number of bytes in a binary protocol header.
+const HDR_LEN = 24
+
+const (
+ ComputeUnitsRead = 1
+ ComputeUnitsWrite = 2
+)
+
+const (
+ DatatypeFlagJSON = uint8(0x01)
+ DatatypeFlagCompressed = uint8(0x02)
+ DatatypeFlagXattrs = uint8(0x04)
+)
+
+// Mapping of CommandCode -> name of command (not exhaustive)
+var CommandNames map[CommandCode]string
+
+// StatusNames human readable names for memcached response.
+var StatusNames map[Status]string
+var StatusDesc map[Status]string
+
+type ErrorMapVersion uint16
+
+const (
+ ErrorMapInvalidVersion = 0 // Unused zero value
+ ErrorMapCB50 = 1 // Used for Couchbase Server 5.0+
+ ErrorMapCB75 = 2 // Used for Couchbase Server 7.5+
+)
+
+func init() {
+ CommandNames = make(map[CommandCode]string)
+ CommandNames[GET] = "GET"
+ CommandNames[SET] = "SET"
+ CommandNames[ADD] = "ADD"
+ CommandNames[REPLACE] = "REPLACE"
+ CommandNames[DELETE] = "DELETE"
+ CommandNames[INCREMENT] = "INCREMENT"
+ CommandNames[DECREMENT] = "DECREMENT"
+ CommandNames[QUIT] = "QUIT"
+ CommandNames[FLUSH] = "FLUSH"
+ CommandNames[GETQ] = "GETQ"
+ CommandNames[NOOP] = "NOOP"
+ CommandNames[VERSION] = "VERSION"
+ CommandNames[GETK] = "GETK"
+ CommandNames[GETKQ] = "GETKQ"
+ CommandNames[APPEND] = "APPEND"
+ CommandNames[PREPEND] = "PREPEND"
+ CommandNames[STAT] = "STAT"
+ CommandNames[SETQ] = "SETQ"
+ CommandNames[ADDQ] = "ADDQ"
+ CommandNames[REPLACEQ] = "REPLACEQ"
+ CommandNames[DELETEQ] = "DELETEQ"
+ CommandNames[INCREMENTQ] = "INCREMENTQ"
+ CommandNames[DECREMENTQ] = "DECREMENTQ"
+ CommandNames[QUITQ] = "QUITQ"
+ CommandNames[FLUSHQ] = "FLUSHQ"
+ CommandNames[APPENDQ] = "APPENDQ"
+ CommandNames[PREPENDQ] = "PREPENDQ"
+ CommandNames[RGET] = "RGET"
+ CommandNames[RSET] = "RSET"
+ CommandNames[RSETQ] = "RSETQ"
+ CommandNames[RAPPEND] = "RAPPEND"
+ CommandNames[RAPPENDQ] = "RAPPENDQ"
+ CommandNames[RPREPEND] = "RPREPEND"
+ CommandNames[RPREPENDQ] = "RPREPENDQ"
+ CommandNames[RDELETE] = "RDELETE"
+ CommandNames[RDELETEQ] = "RDELETEQ"
+ CommandNames[RINCR] = "RINCR"
+ CommandNames[RINCRQ] = "RINCRQ"
+ CommandNames[RDECR] = "RDECR"
+ CommandNames[RDECRQ] = "RDECRQ"
+
+ CommandNames[SASL_LIST_MECHS] = "SASL_LIST_MECHS"
+ CommandNames[SASL_AUTH] = "SASL_AUTH"
+ CommandNames[SASL_STEP] = "SASL_STEP"
+
+ CommandNames[TAP_CONNECT] = "TAP_CONNECT"
+ CommandNames[TAP_MUTATION] = "TAP_MUTATION"
+ CommandNames[TAP_DELETE] = "TAP_DELETE"
+ CommandNames[TAP_FLUSH] = "TAP_FLUSH"
+ CommandNames[TAP_OPAQUE] = "TAP_OPAQUE"
+ CommandNames[TAP_VBUCKET_SET] = "TAP_VBUCKET_SET"
+ CommandNames[TAP_CHECKPOINT_START] = "TAP_CHECKPOINT_START"
+ CommandNames[TAP_CHECKPOINT_END] = "TAP_CHECKPOINT_END"
+
+ CommandNames[UPR_OPEN] = "UPR_OPEN"
+ CommandNames[UPR_ADDSTREAM] = "UPR_ADDSTREAM"
+ CommandNames[UPR_CLOSESTREAM] = "UPR_CLOSESTREAM"
+ CommandNames[UPR_FAILOVERLOG] = "UPR_FAILOVERLOG"
+ CommandNames[UPR_STREAMREQ] = "UPR_STREAMREQ"
+ CommandNames[UPR_STREAMEND] = "UPR_STREAMEND"
+ CommandNames[UPR_SNAPSHOT] = "UPR_SNAPSHOT"
+ CommandNames[UPR_MUTATION] = "UPR_MUTATION"
+ CommandNames[UPR_DELETION] = "UPR_DELETION"
+ CommandNames[UPR_EXPIRATION] = "UPR_EXPIRATION"
+ CommandNames[UPR_FLUSH] = "UPR_FLUSH"
+ CommandNames[UPR_NOOP] = "UPR_NOOP"
+ CommandNames[UPR_BUFFERACK] = "UPR_BUFFERACK"
+ CommandNames[UPR_CONTROL] = "UPR_CONTROL"
+ CommandNames[SUBDOC_GET] = "SUBDOC_GET"
+ CommandNames[SUBDOC_MULTI_LOOKUP] = "SUBDOC_MULTI_LOOKUP"
+ CommandNames[GET_COLLECTIONS_MANIFEST] = "GET_COLLECTIONS_MANIFEST"
+ CommandNames[COLLECTIONS_GET_CID] = "COLLECTIONS_GET_CID"
+ CommandNames[DCP_SYSTEM_EVENT] = "DCP_SYSTEM_EVENT"
+ CommandNames[DCP_SEQNO_ADV] = "DCP_SEQNO_ADV"
+
+ CommandNames[CREATE_RANGE_SCAN] = "CREATE_RANGE_SCAN"
+ CommandNames[CONTINUE_RANGE_SCAN] = "CONTINUE_RANGE_SCAN"
+ CommandNames[CANCEL_RANGE_SCAN] = "CANCEL_RANGE_SCAN"
+
+ StatusNames = make(map[Status]string)
+ StatusNames[SUCCESS] = "SUCCESS"
+ StatusNames[KEY_ENOENT] = "KEY_ENOENT"
+ StatusNames[KEY_EEXISTS] = "KEY_EEXISTS"
+ StatusNames[E2BIG] = "E2BIG"
+ StatusNames[EINVAL] = "EINVAL"
+ StatusNames[NOT_STORED] = "NOT_STORED"
+ StatusNames[DELTA_BADVAL] = "DELTA_BADVAL"
+ StatusNames[NOT_MY_VBUCKET] = "NOT_MY_VBUCKET"
+ StatusNames[NO_BUCKET] = "NO_BUCKET"
+ StatusNames[WOULD_THROTTLE] = "WOULD_THROTTLE"
+ StatusNames[CONFIG_ONLY] = "CONFIG_ONLY"
+ StatusNames[AUTH_STALE] = "AUTH_STALE"
+ StatusNames[AUTH_ERROR] = "AUTH_ERROR"
+ StatusNames[AUTH_CONTINUE] = "AUTH_CONTINUE"
+ StatusNames[ERANGE] = "ERANGE"
+ StatusNames[ROLLBACK] = "ROLLBACK"
+ StatusNames[EACCESS] = "EACCESS"
+ StatusNames[NOT_INITIALIZED] = "NOT_INITIALIZED"
+ StatusNames[RATE_LIMITED_NETWORK_INGRESS] = "RATE_LIMITED_NETWORK_INGRESS"
+ StatusNames[RATE_LIMITED_NETWORK_EGRESS] = "RATE_LIMITED_NETWORK_EGRESS"
+ StatusNames[RATE_LIMITED_MAX_CONNECTIONS] = "RATE_LIMITED_MAX_CONNECTIONS"
+ StatusNames[RATE_LIMITED_MAX_COMMANDS] = "RATE_LIMITED_MAX_COMMANDS"
+ StatusNames[SCOPE_SIZE_LIMIT_EXCEEDED] = "SCOPE_SIZE_LIMIT_EXCEEDED"
+ StatusNames[BUCKET_SIZE_LIMIT_EXCEEDED] = "BUCKET_SIZE_LIMIT_EXCEEDED"
+ StatusNames[BUCKET_RESIDENT_RATIO_TOO_LOW] = "BUCKET_RESIDENT_RATIO_TOO_LOW"
+ StatusNames[BUCKET_DATA_SIZE_TOO_BIG] = "BUCKET_DATA_SIZE_TOO_BIG"
+ StatusNames[BUCKET_DISK_SPACE_TOO_LOW] = "BUCKET_DISK_SPACE_TOO_LOW"
+ StatusNames[UNKNOWN_COMMAND] = "UNKNOWN_COMMAND"
+ StatusNames[ENOMEM] = "ENOMEM"
+ StatusNames[NOT_SUPPORTED] = "NOT_SUPPORTED"
+ StatusNames[EINTERNAL] = "EINTERNAL"
+ StatusNames[EBUSY] = "EBUSY"
+ StatusNames[TMPFAIL] = "TMPFAIL"
+ StatusNames[UNKNOWN_COLLECTION] = "UNKNOWN_COLLECTION"
+ StatusNames[SUBDOC_PATH_NOT_FOUND] = "SUBDOC_PATH_NOT_FOUND"
+ StatusNames[SUBDOC_INVALID_COMBO] = "SUBDOC_INVALID_COMBO"
+ StatusNames[SUBDOC_BAD_MULTI] = "SUBDOC_BAD_MULTI"
+ StatusNames[DURABILITY_INVALID_LEVEL] = "DURABILITY_INVALID_LEVEL"
+ StatusNames[DURABILITY_IMPOSSIBLE] = "DURABILITY_IMPOSSIBLE"
+ StatusNames[SYNC_WRITE_IN_PROGRESS] = "SYNC_WRITE_IN_PROGRESS"
+ StatusNames[SYNC_WRITE_AMBIGUOUS] = "SYNC_WRITE_AMBIGUOUS"
+ StatusNames[SYNC_WRITE_RECOMMITINPROGRESS] = "SYNC_WRITE_RECOMMITINPROGRESS"
+ StatusNames[RANGE_SCAN_MORE] = "RANGE_SCAN_MORE"
+ StatusNames[RANGE_SCAN_COMPLETE] = "RANGE_SCAN_COMPLETE"
+
+ StatusDesc = make(map[Status]string)
+ StatusDesc[RATE_LIMITED_NETWORK_INGRESS] = "Network input rate limit exceeded"
+ StatusDesc[RATE_LIMITED_NETWORK_EGRESS] = "Network output rate limit exceeded"
+ StatusDesc[RATE_LIMITED_MAX_CONNECTIONS] = "Connections limit exceeded"
+ StatusDesc[RATE_LIMITED_MAX_COMMANDS] = "Request rate limit exceeded"
+ StatusDesc[SCOPE_SIZE_LIMIT_EXCEEDED] = "Scope size limit exceeded"
+ StatusDesc[BUCKET_SIZE_LIMIT_EXCEEDED] = "Bucket size limit exceeded"
+}
+
+// String an op code.
+func (o CommandCode) String() (rv string) {
+ rv = CommandNames[o]
+ if rv == "" {
+ rv = fmt.Sprintf("0x%02x", int(o))
+ }
+ return rv
+}
+
+// String an op code.
+func (s Status) String() (rv string) {
+ rv = StatusNames[s]
+ if rv == "" {
+ rv = fmt.Sprintf("0x%02x", int(s))
+ }
+ return rv
+}
+
+// IsQuiet will return true if a command is a "quiet" command.
+func (o CommandCode) IsQuiet() bool {
+ switch o {
+ case GETQ,
+ GETKQ,
+ SETQ,
+ ADDQ,
+ REPLACEQ,
+ DELETEQ,
+ INCREMENTQ,
+ DECREMENTQ,
+ QUITQ,
+ FLUSHQ,
+ APPENDQ,
+ PREPENDQ,
+ RSETQ,
+ RAPPENDQ,
+ RPREPENDQ,
+ RDELETEQ,
+ RINCRQ,
+ RDECRQ:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_req.go b/vendor/github.com/couchbase/gomemcached/mc_req.go
new file mode 100644
index 00000000..9aa93e22
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/mc_req.go
@@ -0,0 +1,662 @@
+package gomemcached
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// The maximum reasonable body length to expect.
+// Anything larger than this will result in an error.
+// The current limit, 21MB, is the size limit supported by ep-engine.
+// 20MB for document value, 1MB for system extended attributes
+var MaxBodyLen = int(21 * 1024 * 1024)
+
+const _BUFLEN = 256
+
+// MCRequest is memcached Request
+type MCRequest struct {
+ // The command being issued
+ Opcode CommandCode
+ // The CAS (if applicable, or 0)
+ Cas uint64
+ // An opaque value to be returned with this request
+ Opaque uint32
+ // The vbucket to which this command belongs
+ VBucket uint16
+ // Command extras, key, and body
+ Extras, Key, Body, ExtMeta []byte
+ // Datatype identifier
+ DataType uint8
+ // len() calls are expensive - cache this in case for collection
+ Keylen int
+ // Collection id for collection based operations
+ CollId [binary.MaxVarintLen32]byte
+ // Length of collection id
+ CollIdLen int
+ // Impersonate user name - could go in FramingExtras, but for efficiency
+ Username [MAX_USER_LEN]byte
+ // Length of Impersonate user name
+ UserLen int
+ // Flexible Framing Extras
+ FramingExtras []FrameInfo
+ // Stored length of incoming framing extras
+ FramingElen int
+}
+
+// Size gives the number of bytes this request requires.
+func (req *MCRequest) HdrSize() int {
+ rv := HDR_LEN + len(req.Extras) + req.CollIdLen + len(req.Key)
+ if req.UserLen != 0 {
+ rv += frameLen(req.UserLen)
+ }
+ for _, e := range req.FramingExtras {
+ rv += frameLen(e.ObjLen)
+ }
+ return rv
+}
+
+func (req *MCRequest) Size() int {
+ return req.HdrSize() + len(req.Body) + len(req.ExtMeta)
+}
+
+// A debugging string representation of this request
+func (req MCRequest) String() string {
+ return fmt.Sprintf("{MCRequest opcode=%s, bodylen=%d, key='%s'}",
+ req.Opcode, len(req.Body), req.Key)
+}
+
+func (req *MCRequest) fillRegularHeaderBytes(data []byte) int {
+ // Byte/ 0 | 1 | 2 | 3 |
+ // / | | | |
+ // |0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|
+ // +---------------+---------------+---------------+---------------+
+ // 0| Magic | Opcode | Key length |
+ // +---------------+---------------+---------------+---------------+
+ // 4| Extras length | Data type | vbucket id |
+ // +---------------+---------------+---------------+---------------+
+ // 8| Total body length |
+ // +---------------+---------------+---------------+---------------+
+ // 12| Opaque |
+ // +---------------+---------------+---------------+---------------+
+ // 16| CAS |
+ // | |
+ // +---------------+---------------+---------------+---------------+
+ // Total 24 bytes
+
+ pos := 0
+ data[pos] = REQ_MAGIC
+ pos++
+ data[pos] = byte(req.Opcode)
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2], uint16(req.CollIdLen+len(req.Key)))
+ pos += 2
+
+ // 4
+ data[pos] = byte(len(req.Extras))
+ pos++
+ // Data type
+ if req.DataType != 0 {
+ data[pos] = byte(req.DataType)
+ }
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2], req.VBucket)
+ pos += 2
+
+ // 8
+ binary.BigEndian.PutUint32(data[pos:pos+4],
+ uint32(len(req.Body)+req.CollIdLen+len(req.Key)+len(req.Extras)+len(req.ExtMeta)))
+ pos += 4
+
+ // 12
+ binary.BigEndian.PutUint32(data[pos:pos+4], req.Opaque)
+ pos += 4
+
+ // 16
+ if req.Cas != 0 {
+ binary.BigEndian.PutUint64(data[pos:pos+8], req.Cas)
+ }
+ pos += 8
+
+ // 24 - extras
+ if len(req.Extras) > 0 {
+ copy(data[pos:pos+len(req.Extras)], req.Extras)
+ pos += len(req.Extras)
+ }
+
+ if len(req.Key) > 0 {
+ if req.CollIdLen > 0 {
+ copy(data[pos:pos+req.CollIdLen], req.CollId[:])
+ pos += req.CollIdLen
+ }
+ copy(data[pos:pos+len(req.Key)], req.Key)
+ pos += len(req.Key)
+ }
+
+ return pos
+}
+
+func (req *MCRequest) fillFastFlexHeaderBytes(data []byte) int {
+ // Byte/ 0 | 1 | 2 | 3 |
+ // / | | | |
+ // |0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|
+ // +---------------+---------------+---------------+---------------+
+ // 0| Magic | Opcode | Framing extras| Key Length |
+ // +---------------+---------------+---------------+---------------+
+ // 4| Extras length | Data type | vbucket id |
+ // +---------------+---------------+---------------+---------------+
+ // 8| Total body length |
+ // +---------------+---------------+---------------+---------------+
+ // 12| Opaque |
+ // +---------------+---------------+---------------+---------------+
+ // 16| CAS |
+ // | |
+ // +---------------+---------------+---------------+---------------+
+ // Total 24 bytes
+
+ pos := 0
+ data[pos] = FLEX_MAGIC
+ pos++
+ data[pos] = byte(req.Opcode)
+ pos++
+ req.FramingElen = frameLen(req.UserLen)
+ data[pos] = byte(req.FramingElen)
+ pos++
+ data[pos] = byte(len(req.Key) + req.CollIdLen)
+ pos++
+
+ // 4
+ data[pos] = byte(len(req.Extras))
+ pos++
+ // Data type
+ if req.DataType != 0 {
+ data[pos] = byte(req.DataType)
+ }
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2], req.VBucket)
+ pos += 2
+
+ // 8
+ binary.BigEndian.PutUint32(data[pos:pos+4],
+ uint32(len(req.Body)+req.CollIdLen+len(req.Key)+frameLen(req.UserLen)+len(req.Extras)+len(req.ExtMeta)))
+ pos += 4
+
+ // 12
+ binary.BigEndian.PutUint32(data[pos:pos+4], req.Opaque)
+ pos += 4
+
+ // 16
+ if req.Cas != 0 {
+ binary.BigEndian.PutUint64(data[pos:pos+8], req.Cas)
+ }
+ pos += 8
+
+ // 24 Flexible extras
+ if req.UserLen > 0 {
+ if req.UserLen < FAST_FRAME_LEN {
+ data[pos] = byte((uint8(FrameImpersonate) << 4) | uint8(req.UserLen))
+ pos++
+ } else {
+ data[pos] = byte((uint8(FrameImpersonate) << 4) | uint8(FAST_FRAME_LEN))
+ pos++
+ data[pos] = byte(req.UserLen - FAST_FRAME_LEN)
+ pos++
+ }
+ copy(data[pos:pos+req.UserLen], req.Username[:req.UserLen])
+ pos += req.UserLen
+ }
+
+ if len(req.Extras) > 0 {
+ copy(data[pos:pos+len(req.Extras)], req.Extras)
+ pos += len(req.Extras)
+ }
+
+ if len(req.Key) > 0 {
+ if req.CollIdLen > 0 {
+ copy(data[pos:pos+req.CollIdLen], req.CollId[:])
+ pos += req.CollIdLen
+ }
+ copy(data[pos:pos+len(req.Key)], req.Key)
+ pos += len(req.Key)
+ }
+
+ return pos
+}
+
+// Returns pos and if trailing by half byte
+func (req *MCRequest) fillFlexHeaderBytes(data []byte) (int, bool) {
+
+ // Byte/ 0 | 1 | 2 | 3 |
+ // / | | | |
+ // |0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|0 1 2 3 4 5 6 7|
+ // +---------------+---------------+---------------+---------------+
+ // 0| Magic (0x08) | Opcode | Framing extras| Key Length |
+ // +---------------+---------------+---------------+---------------+
+ // 4| Extras length | Data type | vbucket id |
+ // +---------------+---------------+---------------+---------------+
+ // 8| Total body length |
+ // +---------------+---------------+---------------+---------------+
+ // 12| Opaque |
+ // +---------------+---------------+---------------+---------------+
+ // 16| CAS |
+ // | |
+ // +---------------+---------------+---------------+---------------+
+ // Total 24 bytes
+
+ data[0] = FLEX_MAGIC
+ data[1] = byte(req.Opcode)
+ data[3] = byte(len(req.Key) + req.CollIdLen)
+ elen := len(req.Extras)
+ data[4] = byte(elen)
+ if req.DataType != 0 {
+ data[5] = byte(req.DataType)
+ }
+ binary.BigEndian.PutUint16(data[6:8], req.VBucket)
+ binary.BigEndian.PutUint32(data[12:16], req.Opaque)
+ if req.Cas != 0 {
+ binary.BigEndian.PutUint64(data[16:24], req.Cas)
+ }
+ pos := HDR_LEN
+
+ // Add framing infos
+ var framingExtras []byte
+ var outputBytes []byte
+ var mergeModeSrc []byte
+ var frameBytes int
+ var halfByteMode bool
+ var mergeMode bool
+ for _, frameInfo := range req.FramingExtras {
+ if !mergeMode {
+ outputBytes, halfByteMode = frameInfo.Bytes()
+ if !halfByteMode {
+ framingExtras = append(framingExtras, outputBytes...)
+ frameBytes += len(outputBytes)
+ } else {
+ mergeMode = true
+ mergeModeSrc = outputBytes
+ }
+ } else {
+ outputBytes, halfByteMode = frameInfo.Bytes()
+ outputBytes := ShiftByteSliceRight4Bits(outputBytes)
+ if halfByteMode {
+ // Previous halfbyte merge with this halfbyte will result in a complete byte
+ mergeMode = false
+ outputBytes = Merge2HalfByteSlices(mergeModeSrc, outputBytes)
+ framingExtras = append(framingExtras, outputBytes...)
+ frameBytes += len(outputBytes)
+ } else {
+ // Merge half byte with a non-half byte will result in a combined half-byte that will
+ // become the source for the next iteration
+ mergeModeSrc = Merge2HalfByteSlices(mergeModeSrc, outputBytes)
+ }
+ }
+ }
+
+ // fast impersonate Flexible Extra
+ if req.UserLen > 0 {
+ if !mergeMode {
+ outputBytes, halfByteMode = obj2Bytes(FrameImpersonate, req.UserLen, req.Username[:req.UserLen])
+ if !halfByteMode {
+ framingExtras = append(framingExtras, outputBytes...)
+ frameBytes += len(outputBytes)
+ } else {
+ mergeMode = true
+ mergeModeSrc = outputBytes
+ }
+ } else {
+ outputBytes, halfByteMode = obj2Bytes(FrameImpersonate, req.UserLen, req.Username[:req.UserLen])
+ outputBytes := ShiftByteSliceRight4Bits(outputBytes)
+ if halfByteMode {
+ // Previous halfbyte merge with this halfbyte will result in a complete byte
+ mergeMode = false
+ outputBytes = Merge2HalfByteSlices(mergeModeSrc, outputBytes)
+ framingExtras = append(framingExtras, outputBytes...)
+ frameBytes += len(outputBytes)
+ } else {
+ // Merge half byte with a non-half byte will result in a combined half-byte that will
+ // become the source for the next iteration
+ mergeModeSrc = Merge2HalfByteSlices(mergeModeSrc, outputBytes)
+ }
+ }
+ }
+
+ if mergeMode {
+ // Commit the temporary merge area into framingExtras
+ framingExtras = append(framingExtras, mergeModeSrc...)
+ frameBytes += len(mergeModeSrc)
+ }
+
+ req.FramingElen = frameBytes
+
+ // these have to be set after we have worked out the size of the Flexible Extras
+ data[2] = byte(req.FramingElen)
+ binary.BigEndian.PutUint32(data[8:12],
+ uint32(len(req.Body)+len(req.Key)+req.CollIdLen+elen+len(req.ExtMeta)+req.FramingElen))
+ copy(data[pos:pos+frameBytes], framingExtras)
+
+ pos += frameBytes
+
+ // Add Extras
+ if len(req.Extras) > 0 {
+ if mergeMode {
+ outputBytes = ShiftByteSliceRight4Bits(req.Extras)
+ data = Merge2HalfByteSlices(data, outputBytes)
+ } else {
+ copy(data[pos:pos+elen], req.Extras)
+ }
+ pos += elen
+ }
+
+ // Add keys
+ if len(req.Key) > 0 {
+ if mergeMode {
+ var key []byte
+ var keylen int
+
+ if req.CollIdLen == 0 {
+ key = req.Key
+ keylen = len(req.Key)
+ } else {
+ key = append(key, req.CollId[:]...)
+ key = append(key, req.Key...)
+ keylen = len(req.Key) + req.CollIdLen
+ }
+
+ outputBytes = ShiftByteSliceRight4Bits(key)
+ data = Merge2HalfByteSlices(data, outputBytes)
+ pos += keylen
+ } else {
+ if req.CollIdLen > 0 {
+ copy(data[pos:pos+req.CollIdLen], req.CollId[:])
+ pos += req.CollIdLen
+ }
+ copy(data[pos:pos+len(req.Key)], req.Key)
+ pos += len(req.Key)
+ }
+ }
+
+ return pos, mergeMode
+}
+
+func (req *MCRequest) FillHeaderBytes(data []byte) (int, bool) {
+ if len(req.FramingExtras) > 0 {
+ return req.fillFlexHeaderBytes(data)
+ } else if req.UserLen > 0 {
+ return req.fillFastFlexHeaderBytes(data), false
+ } else {
+ return req.fillRegularHeaderBytes(data), false
+ }
+}
+
+// HeaderBytes will return the wire representation of the request header
+// (with the extras and key).
+func (req *MCRequest) HeaderBytes() []byte {
+ data := make([]byte, req.HdrSize())
+
+ req.FillHeaderBytes(data)
+
+ return data
+}
+
+// Bytes will return the wire representation of this request.
+func (req *MCRequest) Bytes() []byte {
+ data := make([]byte, req.Size())
+ req.bytes(data)
+ return data
+}
+
+// BytesPreallocated will fill the data in the preallocated slice
+func (req *MCRequest) BytesPreallocated(out []byte) {
+ for i := 0; i < len(out); i++ {
+ out[i] = 0
+ }
+ req.bytes(out)
+}
+
+func (req *MCRequest) bytes(data []byte) {
+ pos, halfByteMode := req.FillHeaderBytes(data)
+ // TODO - the halfByteMode should be revisited for a more efficient
+ // way of doing things
+
+ if len(req.Body) > 0 {
+ if halfByteMode {
+ shifted := ShiftByteSliceRight4Bits(req.Body)
+ data = Merge2HalfByteSlices(data, shifted)
+ } else {
+ copy(data[pos:pos+len(req.Body)], req.Body)
+ }
+ }
+
+ if len(req.ExtMeta) > 0 {
+ if halfByteMode {
+ shifted := ShiftByteSliceRight4Bits(req.ExtMeta)
+ data = Merge2HalfByteSlices(data, shifted)
+ } else {
+ copy(data[pos+len(req.Body):pos+len(req.Body)+len(req.ExtMeta)], req.ExtMeta)
+ }
+ }
+}
+
+// Transmit will send this request message across a writer.
+func (req *MCRequest) Transmit(w io.Writer) (n int, err error) {
+ l := req.Size()
+ if l < _BUFLEN {
+ data := make([]byte, l)
+ req.bytes(data)
+ n, err = w.Write(data)
+ } else {
+ data := make([]byte, req.HdrSize())
+ req.FillHeaderBytes(data)
+ n, err = w.Write(data)
+ if err == nil {
+ m := 0
+ m, err = w.Write(req.Body)
+ n += m
+ }
+ }
+ return
+}
+
+func (req *MCRequest) receiveHeaderCommon(hdrBytes []byte) (elen, totalBodyLen int) {
+ elen = int(hdrBytes[4])
+ // Data type at 5
+ req.DataType = uint8(hdrBytes[5])
+
+ req.Opcode = CommandCode(hdrBytes[1])
+ // Vbucket at 6:7
+ req.VBucket = binary.BigEndian.Uint16(hdrBytes[6:])
+ totalBodyLen = int(binary.BigEndian.Uint32(hdrBytes[8:]))
+
+ req.Opaque = binary.BigEndian.Uint32(hdrBytes[12:])
+ req.Cas = binary.BigEndian.Uint64(hdrBytes[16:])
+ return
+}
+
+func (req *MCRequest) receiveRegHeader(hdrBytes []byte) (elen, totalBodyLen int) {
+ elen, totalBodyLen = req.receiveHeaderCommon(hdrBytes)
+ req.Keylen = int(binary.BigEndian.Uint16(hdrBytes[2:]))
+ return
+}
+
+func (req *MCRequest) receiveFlexibleFramingHeader(hdrBytes []byte) (elen, totalBodyLen, framingElen int) {
+ elen, totalBodyLen = req.receiveHeaderCommon(hdrBytes)
+
+ // For flexible framing header, key length is a single byte at byte index 3
+ req.Keylen = int(binary.BigEndian.Uint16(hdrBytes[2:]) & 0x0ff)
+ // Flexible framing lengh is a single byte at index 2
+ framingElen = int(binary.BigEndian.Uint16(hdrBytes[2:]) >> 8)
+ req.FramingElen = framingElen
+ return
+}
+
+func (req *MCRequest) populateRegularBody(r io.Reader, totalBodyLen, elen int) (int, error) {
+ var m int
+ var err error
+ if totalBodyLen > 0 {
+ buf := make([]byte, totalBodyLen)
+ m, err = io.ReadFull(r, buf)
+ if err == nil {
+ if req.Opcode >= TAP_MUTATION &&
+ req.Opcode <= TAP_CHECKPOINT_END &&
+ len(buf) > 1 {
+ // In these commands there is "engine private"
+ // data at the end of the extras. The first 2
+ // bytes of extra data give its length.
+ elen += int(binary.BigEndian.Uint16(buf))
+ }
+
+ req.Extras = buf[0:elen]
+ req.Key = buf[elen : req.Keylen+elen]
+
+ // get the length of extended metadata
+ extMetaLen := 0
+ if elen > 29 {
+ extMetaLen = int(binary.BigEndian.Uint16(req.Extras[28:30]))
+ }
+
+ bodyLen := totalBodyLen - req.Keylen - elen - extMetaLen
+ if bodyLen > MaxBodyLen {
+ return m, fmt.Errorf("%d is too big (max %d)",
+ bodyLen, MaxBodyLen)
+ }
+
+ req.Body = buf[req.Keylen+elen : req.Keylen+elen+bodyLen]
+ req.ExtMeta = buf[req.Keylen+elen+bodyLen:]
+ }
+ }
+ return m, err
+}
+
+func (req *MCRequest) populateFlexBody(r io.Reader, totalBodyLen, elen, framingElen int) (int, error) {
+ var m int
+ var err error
+ if totalBodyLen > 0 {
+ buf := make([]byte, totalBodyLen)
+ m, err = io.ReadFull(r, buf)
+ if err != nil {
+ return m, err
+ }
+ err = req.populateFlexBodyInternal(buf, totalBodyLen, elen, framingElen)
+ }
+ return m, err
+}
+
+func (req *MCRequest) populateFlexBodyInternal(buf []byte, totalBodyLen, elen, framingElen int) error {
+ var halfByteOffset bool
+ var err error
+ if framingElen > 0 {
+ var objs []FrameInfo
+ objs, err, halfByteOffset = parseFrameInfoObjects(buf, framingElen)
+ if err != nil {
+ return err
+ }
+ req.FramingExtras = objs
+ }
+
+ err = req.populateFlexBodyAfterFrames(buf, totalBodyLen, elen, framingElen, halfByteOffset)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (req *MCRequest) populateFlexBodyAfterFrames(buf []byte, totalBodyLen, elen, framingElen int, halfByteOffset bool) error {
+ var idxCursor int = framingElen
+ if req.Opcode >= TAP_MUTATION && req.Opcode <= TAP_CHECKPOINT_END && len(buf[idxCursor:]) > 1 {
+ // In these commands there is "engine private"
+ // data at the end of the extras. The first 2
+ // bytes of extra data give its length.
+ if !halfByteOffset {
+ elen += int(binary.BigEndian.Uint16(buf[idxCursor:]))
+ } else {
+ // 0 1 2 3 4 .... 19 20 21 22 ... 32
+ // ^-----^ ^-------^ ^------------^
+ // offset data do not care
+ var buffer uint32 = binary.BigEndian.Uint32(buf[idxCursor:])
+ buffer &= 0xffff000
+ elen += int(buffer >> 12)
+ }
+ }
+
+ // Get the extras
+ if !halfByteOffset {
+ req.Extras = buf[idxCursor : idxCursor+elen]
+ } else {
+ preShift := buf[idxCursor : idxCursor+elen+1]
+ req.Extras = ShiftByteSliceLeft4Bits(preShift)
+ }
+ idxCursor += elen
+
+ // Get the Key
+ if !halfByteOffset {
+ req.Key = buf[idxCursor : idxCursor+req.Keylen]
+ } else {
+ preShift := buf[idxCursor : idxCursor+req.Keylen+1]
+ req.Key = ShiftByteSliceLeft4Bits(preShift)
+ }
+ idxCursor += req.Keylen
+
+ // get the length of extended metadata
+ extMetaLen := 0
+ if elen > 29 {
+ extMetaLen = int(binary.BigEndian.Uint16(req.Extras[28:30]))
+ }
+ idxCursor += extMetaLen
+
+ bodyLen := totalBodyLen - req.Keylen - elen - extMetaLen - framingElen
+ if bodyLen > MaxBodyLen {
+ return fmt.Errorf("%d is too big (max %d)",
+ bodyLen, MaxBodyLen)
+ }
+
+ if !halfByteOffset {
+ req.Body = buf[idxCursor : idxCursor+bodyLen]
+ idxCursor += bodyLen
+ } else {
+ preShift := buf[idxCursor : idxCursor+bodyLen+1]
+ req.Body = ShiftByteSliceLeft4Bits(preShift)
+ idxCursor += bodyLen
+ }
+
+ if extMetaLen > 0 {
+ if !halfByteOffset {
+ req.ExtMeta = buf[idxCursor:]
+ } else {
+ preShift := buf[idxCursor:]
+ req.ExtMeta = ShiftByteSliceLeft4Bits(preShift)
+ }
+ }
+
+ return nil
+}
+
+// Receive will fill this MCRequest with the data from a reader.
+func (req *MCRequest) Receive(r io.Reader, hdrBytes []byte) (int, error) {
+ if len(hdrBytes) < HDR_LEN {
+ hdrBytes = []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}
+ }
+ n, err := io.ReadFull(r, hdrBytes)
+ if err != nil {
+ fmt.Printf("Err %v\n", err)
+ return n, err
+ }
+
+ switch hdrBytes[0] {
+ case RES_MAGIC:
+ fallthrough
+ case REQ_MAGIC:
+ elen, totalBodyLen := req.receiveRegHeader(hdrBytes)
+ bodyRead, err := req.populateRegularBody(r, totalBodyLen, elen)
+ return n + bodyRead, err
+ case FLEX_MAGIC:
+ elen, totalBodyLen, framingElen := req.receiveFlexibleFramingHeader(hdrBytes)
+ bodyRead, err := req.populateFlexBody(r, totalBodyLen, elen, framingElen)
+ return n + bodyRead, err
+ default:
+ return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0])
+ }
+}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_res.go b/vendor/github.com/couchbase/gomemcached/mc_res.go
new file mode 100644
index 00000000..17bdc234
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/mc_res.go
@@ -0,0 +1,361 @@
+package gomemcached
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// MCResponse is memcached response
+type MCResponse struct {
+ // The command opcode of the command that sent the request
+ Opcode CommandCode
+ // The status of the response
+ Status Status
+ // The opaque sent in the request
+ Opaque uint32
+ // The CAS identifier (if applicable)
+ Cas uint64
+ // Extras, key, and body for this response
+ Extras, FlexibleExtras, Key, Body []byte
+ // If true, this represents a fatal condition and we should hang up
+ Fatal bool
+ // Datatype identifier
+ DataType uint8
+
+ recycleFunc func()
+ recycleOnce sync.Once
+}
+
+// A debugging string representation of this response
+func (res MCResponse) String() string {
+ return fmt.Sprintf("{MCResponse status=%v keylen=%d, extralen=%d, bodylen=%d, flexible=%v}",
+ res.Status, len(res.Key), len(res.Extras), len(res.Body), res.FlexibleExtras)
+}
+
+// Response as an error.
+func (res *MCResponse) Error() string {
+ return fmt.Sprintf("MCResponse status=%v, opcode=%v, opaque=%v, msg: %s",
+ res.Status, res.Opcode, res.Opaque, string(res.Body))
+}
+
+func errStatus(e error) Status {
+ status := UNKNOWN_STATUS
+ if res, ok := e.(*MCResponse); ok {
+ status = res.Status
+ }
+ return status
+}
+
+// IsNotFound is true if this error represents a "not found" response.
+func IsNotFound(e error) bool {
+ return errStatus(e) == KEY_ENOENT
+}
+
+// IsFatal is false if this error isn't believed to be fatal to a connection.
+func IsFatal(e error) bool {
+ if e == nil {
+ return false
+ }
+ _, ok := isFatal[errStatus(e)]
+ if ok {
+ return true
+ }
+ return false
+}
+
+func IsTenantLimit(e error) bool {
+ s := errStatus(e)
+ return s >= RATE_LIMITED_NETWORK_INGRESS && s <= BUCKET_SIZE_LIMIT_EXCEEDED
+}
+
+// Size is number of bytes this response consumes on the wire.
+func (res *MCResponse) Size() int {
+ return HDR_LEN + len(res.Extras) + len(res.Key) + len(res.Body)
+}
+
+func (res *MCResponse) fillHeaderBytes(data []byte) int {
+ pos := 0
+ data[pos] = RES_MAGIC
+ pos++
+ data[pos] = byte(res.Opcode)
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2],
+ uint16(len(res.Key)))
+ pos += 2
+
+ // 4
+ data[pos] = byte(len(res.Extras))
+ pos++
+ // Data type
+ if res.DataType != 0 {
+ data[pos] = byte(res.DataType)
+ } else {
+ data[pos] = 0
+ }
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2], uint16(res.Status))
+ pos += 2
+
+ // 8
+ binary.BigEndian.PutUint32(data[pos:pos+4],
+ uint32(len(res.Body)+len(res.Key)+len(res.Extras)))
+ pos += 4
+
+ // 12
+ binary.BigEndian.PutUint32(data[pos:pos+4], res.Opaque)
+ pos += 4
+
+ // 16
+ binary.BigEndian.PutUint64(data[pos:pos+8], res.Cas)
+ pos += 8
+
+ if len(res.Extras) > 0 {
+ copy(data[pos:pos+len(res.Extras)], res.Extras)
+ pos += len(res.Extras)
+ }
+
+ if len(res.Key) > 0 {
+ copy(data[pos:pos+len(res.Key)], res.Key)
+ pos += len(res.Key)
+ }
+
+ return pos
+}
+
+// HeaderBytes will get just the header bytes for this response.
+func (res *MCResponse) HeaderBytes() []byte {
+ data := make([]byte, HDR_LEN+len(res.Extras)+len(res.Key))
+
+ res.fillHeaderBytes(data)
+
+ return data
+}
+
+// Bytes will return the actual bytes transmitted for this response.
+func (res *MCResponse) Bytes() []byte {
+ data := make([]byte, res.Size())
+
+ pos := res.fillHeaderBytes(data)
+
+ copy(data[pos:pos+len(res.Body)], res.Body)
+
+ return data
+}
+
+// Transmit will send this response message across a writer.
+func (res *MCResponse) Transmit(w io.Writer) (n int, err error) {
+ if len(res.Body) < 128 {
+ n, err = w.Write(res.Bytes())
+ } else {
+ n, err = w.Write(res.HeaderBytes())
+ if err == nil {
+ m := 0
+ m, err = w.Write(res.Body)
+ m += n
+ }
+ }
+ return
+}
+
+// Receive will fill this MCResponse with the data from this reader.
+func (res *MCResponse) Receive(r io.Reader, hdrBytes []byte) (n int, err error) {
+ return res.ReceiveWithBuf(r, hdrBytes, nil)
+}
+
+func (res *MCResponse) ReceiveWithBuf(r io.Reader, hdrbytes, buf []byte) (n int, err error) {
+ return res.receiveInternal(r, hdrbytes, buf, nil, nil)
+}
+
+func (res *MCResponse) ReceiveWithDatapool(r io.Reader, hdrbytes []byte, getter func(uint64) ([]byte, error), done func([]byte)) (n int, err error) {
+ return res.receiveInternal(r, hdrbytes, nil, getter, done)
+}
+
+// receiveInternal takes an optional pre-allocated []byte buf which
+// will be used if its capacity is large enough, otherwise a new
+// []byte slice is allocated
+// If a getter is provided, it'll attempt to use it before allocating
+func (res *MCResponse) receiveInternal(r io.Reader, hdrBytes, buf []byte, getter func(uint64) ([]byte, error), done func([]byte)) (n int, err error) {
+ if len(hdrBytes) < HDR_LEN {
+ hdrBytes = []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}
+ }
+ n, err = io.ReadFull(r, hdrBytes)
+ if err != nil {
+ return n, err
+ }
+
+ var klen, flen int
+ switch hdrBytes[0] {
+ case RES_MAGIC:
+ fallthrough
+ case REQ_MAGIC:
+ klen = int(binary.BigEndian.Uint16(hdrBytes[2:4]))
+ case FLEX_RES_MAGIC:
+ flen = int(hdrBytes[2])
+ klen = int(hdrBytes[3])
+ default:
+ return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0])
+ }
+ elen := int(hdrBytes[4])
+
+ res.Opcode = CommandCode(hdrBytes[1])
+ res.DataType = uint8(hdrBytes[5])
+ res.Status = Status(binary.BigEndian.Uint16(hdrBytes[6:8]))
+ res.Opaque = binary.BigEndian.Uint32(hdrBytes[12:16])
+ res.Cas = binary.BigEndian.Uint64(hdrBytes[16:24])
+
+ bodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:12])) - (klen + elen + flen)
+
+ //defer function to debug the panic seen with MB-15557
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf(`Panic in Receive. Response %v \n
+ key len %v extra len %v bodylen %v`, res, klen, elen, bodyLen)
+ }
+ }()
+
+ bufNeed := klen + elen + flen + bodyLen
+ if buf != nil && cap(buf) >= bufNeed {
+ buf = buf[0:bufNeed]
+ } else {
+ if getter != nil {
+ buf, err = getter(uint64(bufNeed))
+ if err != nil {
+ buf = make([]byte, bufNeed)
+ } else {
+ res.recycleFunc = func() {
+ done(buf)
+ }
+ }
+ } else {
+ buf = make([]byte, bufNeed)
+ }
+ }
+
+ m, err := io.ReadFull(r, buf)
+ if err == nil {
+ if flen > 0 {
+ res.FlexibleExtras = buf[0:flen]
+ buf = buf[flen:]
+ }
+ res.Extras = buf[0:elen]
+ res.Key = buf[elen : klen+elen]
+ res.Body = buf[klen+elen:]
+ }
+
+ return n + m, err
+}
+
+func (res *MCResponse) ComputeUnits() (ru uint64, wu uint64) {
+ if res.FlexibleExtras == nil || len(res.FlexibleExtras) == 0 {
+ return
+ }
+ for i := 0; i < len(res.FlexibleExtras); {
+ // TODO check: this seems to be the opposite of the documentation?
+ l := res.FlexibleExtras[i] & 0x0f
+ switch res.FlexibleExtras[i] >> 4 {
+ case ComputeUnitsRead:
+ ru = uint64(binary.BigEndian.Uint16(res.FlexibleExtras[i+1 : i+3]))
+ case ComputeUnitsWrite:
+ wu = uint64(binary.BigEndian.Uint16(res.FlexibleExtras[i+1 : i+3]))
+
+ // ID escape: we need to skip the next byte, and ignore
+ case 15:
+ i++
+ }
+
+ // data len is either 1..14, or 15 + next byte
+ switch l {
+ case 0:
+ panic(fmt.Sprintf("Invalid Flexible Extras length received! %v", l))
+ case 15:
+
+ // rest of length in next byte, which needs to be skipped too
+ i++
+ i += int(l + 1 + res.FlexibleExtras[i])
+ default:
+ i += int(l + 1)
+ }
+ }
+ return
+}
+
+func (res *MCResponse) Recycle() {
+ if res != nil && res.recycleFunc != nil {
+ res.recycleOnce.Do(func() {
+ res.recycleFunc()
+ })
+ }
+}
+
+type MCResponsePool struct {
+ pool *sync.Pool
+}
+
+func NewMCResponsePool() *MCResponsePool {
+ rv := &MCResponsePool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return &MCResponse{}
+ },
+ },
+ }
+
+ return rv
+}
+
+func (this *MCResponsePool) Get() *MCResponse {
+ return this.pool.Get().(*MCResponse)
+}
+
+func (this *MCResponsePool) Put(r *MCResponse) {
+ if r == nil {
+ return
+ }
+
+ r.Extras = nil
+ r.Key = nil
+ r.Body = nil
+ r.Fatal = false
+
+ this.pool.Put(r)
+}
+
+type StringMCResponsePool struct {
+ pool *sync.Pool
+ size int
+}
+
+func NewStringMCResponsePool(size int) *StringMCResponsePool {
+ rv := &StringMCResponsePool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return make(map[string]*MCResponse, size)
+ },
+ },
+ size: size,
+ }
+
+ return rv
+}
+
+func (this *StringMCResponsePool) Get() map[string]*MCResponse {
+ return this.pool.Get().(map[string]*MCResponse)
+}
+
+func (this *StringMCResponsePool) Put(m map[string]*MCResponse) {
+ if m == nil || len(m) > 2*this.size {
+ return
+ }
+
+ for k := range m {
+ m[k] = nil
+ delete(m, k)
+ }
+
+ this.pool.Put(m)
+}
diff --git a/vendor/github.com/couchbase/gomemcached/tap.go b/vendor/github.com/couchbase/gomemcached/tap.go
new file mode 100644
index 00000000..e4862328
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/tap.go
@@ -0,0 +1,168 @@
+package gomemcached
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+type TapConnectFlag uint32
+
+// Tap connect option flags
+const (
+ BACKFILL = TapConnectFlag(0x01)
+ DUMP = TapConnectFlag(0x02)
+ LIST_VBUCKETS = TapConnectFlag(0x04)
+ TAKEOVER_VBUCKETS = TapConnectFlag(0x08)
+ SUPPORT_ACK = TapConnectFlag(0x10)
+ REQUEST_KEYS_ONLY = TapConnectFlag(0x20)
+ CHECKPOINT = TapConnectFlag(0x40)
+ REGISTERED_CLIENT = TapConnectFlag(0x80)
+ FIX_FLAG_BYTEORDER = TapConnectFlag(0x100)
+)
+
+// Tap opaque event subtypes
+const (
+ TAP_OPAQUE_ENABLE_AUTO_NACK = 0
+ TAP_OPAQUE_INITIAL_VBUCKET_STREAM = 1
+ TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC = 2
+ TAP_OPAQUE_CLOSE_TAP_STREAM = 7
+ TAP_OPAQUE_CLOSE_BACKFILL = 8
+)
+
+// Tap item flags
+const (
+ TAP_ACK = 1
+ TAP_NO_VALUE = 2
+ TAP_FLAG_NETWORK_BYTE_ORDER = 4
+)
+
+// TapConnectFlagNames for TapConnectFlag
+var TapConnectFlagNames = map[TapConnectFlag]string{
+ BACKFILL: "BACKFILL",
+ DUMP: "DUMP",
+ LIST_VBUCKETS: "LIST_VBUCKETS",
+ TAKEOVER_VBUCKETS: "TAKEOVER_VBUCKETS",
+ SUPPORT_ACK: "SUPPORT_ACK",
+ REQUEST_KEYS_ONLY: "REQUEST_KEYS_ONLY",
+ CHECKPOINT: "CHECKPOINT",
+ REGISTERED_CLIENT: "REGISTERED_CLIENT",
+ FIX_FLAG_BYTEORDER: "FIX_FLAG_BYTEORDER",
+}
+
+// TapItemParser is a function to parse a single tap extra.
+type TapItemParser func(io.Reader) (interface{}, error)
+
+// TapParseUint64 is a function to parse a single tap uint64.
+func TapParseUint64(r io.Reader) (interface{}, error) {
+ var rv uint64
+ err := binary.Read(r, binary.BigEndian, &rv)
+ return rv, err
+}
+
+// TapParseUint16 is a function to parse a single tap uint16.
+func TapParseUint16(r io.Reader) (interface{}, error) {
+ var rv uint16
+ err := binary.Read(r, binary.BigEndian, &rv)
+ return rv, err
+}
+
+// TapParseBool is a function to parse a single tap boolean.
+func TapParseBool(r io.Reader) (interface{}, error) {
+ return true, nil
+}
+
+// TapParseVBList parses a list of vBucket numbers as []uint16.
+func TapParseVBList(r io.Reader) (interface{}, error) {
+ num, err := TapParseUint16(r)
+ if err != nil {
+ return nil, err
+ }
+ n := int(num.(uint16))
+
+ rv := make([]uint16, n)
+ for i := 0; i < n; i++ {
+ x, err := TapParseUint16(r)
+ if err != nil {
+ return nil, err
+ }
+ rv[i] = x.(uint16)
+ }
+
+ return rv, err
+}
+
+// TapFlagParsers parser functions for TAP fields.
+var TapFlagParsers = map[TapConnectFlag]TapItemParser{
+ BACKFILL: TapParseUint64,
+ LIST_VBUCKETS: TapParseVBList,
+}
+
+// SplitFlags will split the ORed flags into the individual bit flags.
+func (f TapConnectFlag) SplitFlags() []TapConnectFlag {
+ rv := []TapConnectFlag{}
+ for i := uint32(1); f != 0; i = i << 1 {
+ if uint32(f)&i == i {
+ rv = append(rv, TapConnectFlag(i))
+ }
+ f = TapConnectFlag(uint32(f) & (^i))
+ }
+ return rv
+}
+
+func (f TapConnectFlag) String() string {
+ parts := []string{}
+ for _, x := range f.SplitFlags() {
+ p := TapConnectFlagNames[x]
+ if p == "" {
+ p = fmt.Sprintf("0x%x", int(x))
+ }
+ parts = append(parts, p)
+ }
+ return strings.Join(parts, "|")
+}
+
+type TapConnect struct {
+ Flags map[TapConnectFlag]interface{}
+ RemainingBody []byte
+ Name string
+}
+
+// ParseTapCommands parse the tap request into the interesting bits we may
+// need to do something with.
+func (req *MCRequest) ParseTapCommands() (TapConnect, error) {
+ rv := TapConnect{
+ Flags: map[TapConnectFlag]interface{}{},
+ Name: string(req.Key),
+ }
+
+ if len(req.Extras) < 4 {
+ return rv, fmt.Errorf("not enough extra bytes: %x", req.Extras)
+ }
+
+ flags := TapConnectFlag(binary.BigEndian.Uint32(req.Extras))
+
+ r := bytes.NewReader(req.Body)
+
+ for _, f := range flags.SplitFlags() {
+ fun := TapFlagParsers[f]
+ if fun == nil {
+ fun = TapParseBool
+ }
+
+ val, err := fun(r)
+ if err != nil {
+ return rv, err
+ }
+
+ rv.Flags[f] = val
+ }
+
+ var err error
+ rv.RemainingBody, err = ioutil.ReadAll(r)
+
+ return rv, err
+}
diff --git a/vendor/github.com/couchbase/goutils/LICENSE.md b/vendor/github.com/couchbase/goutils/LICENSE.md
new file mode 100644
index 00000000..e06d2081
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/LICENSE.md
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/couchbase/goutils/logging/logger.go b/vendor/github.com/couchbase/goutils/logging/logger.go
new file mode 100644
index 00000000..27ac5542
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/logging/logger.go
@@ -0,0 +1,361 @@
+// Copyright (c) 2016 Couchbase, Inc.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+// except in compliance with the License. You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed under the
+// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+// either express or implied. See the License for the specific language governing permissions
+// and limitations under the License.
+
+package logging
+
+import (
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+type Level int
+
+const (
+ NONE = Level(iota) // Disable all logging
+ FATAL // System is in severe error state and has to abort
+ SEVERE // System is in severe error state and cannot recover reliably
+ ERROR // System is in error state but can recover and continue reliably
+ WARN // System approaching error state, or is in a correct but undesirable state
+ INFO // System-level events and status, in correct states
+ REQUEST // Request-level events, with request-specific rlevel
+ TRACE // Trace detailed system execution, e.g. function entry / exit
+ DEBUG // Debug
+)
+
+type LogEntryFormatter int
+
+const (
+ TEXTFORMATTER = LogEntryFormatter(iota)
+ JSONFORMATTER
+ KVFORMATTER
+ UNIFORMFORMATTER
+)
+
+func (level Level) String() string {
+ return _LEVEL_NAMES[level]
+}
+
+var _LEVEL_NAMES = []string{
+ DEBUG: "DEBUG",
+ TRACE: "TRACE",
+ REQUEST: "REQUEST",
+ INFO: "INFO",
+ WARN: "WARN",
+ ERROR: "ERROR",
+ SEVERE: "SEVERE",
+ FATAL: "FATAL",
+ NONE: "NONE",
+}
+
+var _LEVEL_MAP = map[string]Level{
+ "debug": DEBUG,
+ "trace": TRACE,
+ "request": REQUEST,
+ "info": INFO,
+ "warn": WARN,
+ "error": ERROR,
+ "severe": SEVERE,
+ "fatal": FATAL,
+ "none": NONE,
+}
+
+// cache logging enablement to improve runtime performance (reduces from multiple tests to a single test on each call)
+var (
+ cachedDebug bool
+ cachedTrace bool
+ cachedRequest bool
+ cachedInfo bool
+ cachedWarn bool
+ cachedError bool
+ cachedSevere bool
+ cachedFatal bool
+)
+
+// maintain the cached logging state
+func cacheLoggingChange() {
+ cachedDebug = !skipLogging(DEBUG)
+ cachedTrace = !skipLogging(TRACE)
+ cachedRequest = !skipLogging(REQUEST)
+ cachedInfo = !skipLogging(INFO)
+ cachedWarn = !skipLogging(WARN)
+ cachedError = !skipLogging(ERROR)
+ cachedSevere = !skipLogging(SEVERE)
+ cachedFatal = !skipLogging(FATAL)
+}
+
+func ParseLevel(name string) (level Level, ok bool) {
+ level, ok = _LEVEL_MAP[strings.ToLower(name)]
+ return
+}
+
+// Logger provides a common interface for logging libraries
+type Logger interface {
+ // Higher performance
+ Loga(level Level, f func() string)
+ Debuga(f func() string)
+ Tracea(f func() string)
+ Requesta(rlevel Level, f func() string)
+ Infoa(f func() string)
+ Warna(f func() string)
+ Errora(f func() string)
+ Severea(f func() string)
+ Fatala(f func() string)
+
+ // Printf style
+ Logf(level Level, fmt string, args ...interface{})
+ Debugf(fmt string, args ...interface{})
+ Tracef(fmt string, args ...interface{})
+ Requestf(rlevel Level, fmt string, args ...interface{})
+ Infof(fmt string, args ...interface{})
+ Warnf(fmt string, args ...interface{})
+ Errorf(fmt string, args ...interface{})
+ Severef(fmt string, args ...interface{})
+ Fatalf(fmt string, args ...interface{})
+
+ /*
+ These APIs control the logging level
+ */
+ SetLevel(Level) // Set the logging level
+ Level() Level // Get the current logging level
+}
+
+var logger Logger = nil
+var curLevel Level = DEBUG // initially set to never skip
+
+var loggerMutex sync.RWMutex
+
+// All the methods below first acquire the mutex (mostly in exclusive mode)
+// and only then check if logging at the current level is enabled.
+// This introduces a fair bottleneck for those log entries that should be
+// skipped (the majority, at INFO or below levels)
+// We try to predict here if we should lock the mutex at all by caching
+// the current log level: while dynamically changing logger, there might
+// be the odd entry skipped as the new level is cached.
+// Since we seem to never change the logger, this is not an issue.
+func skipLogging(level Level) bool {
+ if logger == nil {
+ return true
+ }
+ return level > curLevel
+}
+
+func SetLogger(newLogger Logger) {
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger = newLogger
+ if logger == nil {
+ curLevel = NONE
+ } else {
+ curLevel = newLogger.Level()
+ }
+ cacheLoggingChange()
+}
+
+// we are using deferred unlocking here throughout as we have to do this
+// for the anonymous function variants even though it would be more efficient
+// to not do this for the printf style variants
+// anonymous function variants
+
+func Loga(level Level, f func() string) {
+ if skipLogging(level) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Loga(level, f)
+}
+
+func Debuga(f func() string) {
+ if !cachedDebug {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Debuga(f)
+}
+
+func Tracea(f func() string) {
+ if !cachedTrace {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Tracea(f)
+}
+
+func Requesta(rlevel Level, f func() string) {
+ if !cachedRequest {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Requesta(rlevel, f)
+}
+
+func Infoa(f func() string) {
+ if !cachedInfo {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Infoa(f)
+}
+
+func Warna(f func() string) {
+ if !cachedWarn {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Warna(f)
+}
+
+func Errora(f func() string) {
+ if !cachedError {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Errora(f)
+}
+
+func Severea(f func() string) {
+ if !cachedSevere {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Severea(f)
+}
+
+func Fatala(f func() string) {
+ if !cachedFatal {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Fatala(f)
+}
+
+// printf-style variants
+
+func Logf(level Level, fmt string, args ...interface{}) {
+ if skipLogging(level) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Logf(level, fmt, args...)
+}
+
+func Debugf(fmt string, args ...interface{}) {
+ if !cachedDebug {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Debugf(fmt, args...)
+}
+
+func Tracef(fmt string, args ...interface{}) {
+ if !cachedTrace {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Tracef(fmt, args...)
+}
+
+func Requestf(rlevel Level, fmt string, args ...interface{}) {
+ if !cachedRequest {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Requestf(rlevel, fmt, args...)
+}
+
+func Infof(fmt string, args ...interface{}) {
+ if !cachedInfo {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Infof(fmt, args...)
+}
+
+func Warnf(fmt string, args ...interface{}) {
+ if !cachedWarn {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Warnf(fmt, args...)
+}
+
+func Errorf(fmt string, args ...interface{}) {
+ if !cachedError {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Errorf(fmt, args...)
+}
+
+func Severef(fmt string, args ...interface{}) {
+ if !cachedSevere {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Severef(fmt, args...)
+}
+
+func Fatalf(fmt string, args ...interface{}) {
+ if !cachedFatal {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Fatalf(fmt, args...)
+}
+
+func SetLevel(level Level) {
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.SetLevel(level)
+ curLevel = level
+ cacheLoggingChange()
+}
+
+func LogLevel() Level {
+ loggerMutex.RLock()
+ defer loggerMutex.RUnlock()
+ return logger.Level()
+}
+
+func Stackf(level Level, fmt string, args ...interface{}) {
+ if skipLogging(level) {
+ return
+ }
+ buf := make([]byte, 1<<16)
+ n := runtime.Stack(buf, false)
+ s := string(buf[0:n])
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Logf(level, fmt, args...)
+ logger.Logf(level, s)
+}
+
+func init() {
+ logger := NewLogger(os.Stderr, INFO, TEXTFORMATTER)
+ SetLogger(logger)
+}
diff --git a/vendor/github.com/couchbase/goutils/logging/logger_golog.go b/vendor/github.com/couchbase/goutils/logging/logger_golog.go
new file mode 100644
index 00000000..11831f4f
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/logging/logger_golog.go
@@ -0,0 +1,273 @@
+// Copyright (c) 2016-2019 Couchbase, Inc.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+// except in compliance with the License. You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed under the
+// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+// either express or implied. See the License for the specific language governing permissions
+// and limitations under the License.
+
+package logging
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "time"
+)
+
+type goLogger struct {
+ logger *log.Logger
+ level Level
+ entryFormatter formatter
+}
+
+const (
+ _LEVEL = "_level"
+ _MSG = "_msg"
+ _TIME = "_time"
+ _RLEVEL = "_rlevel"
+)
+
+func NewLogger(out io.Writer, lvl Level, fmtLogging LogEntryFormatter, fmtArgs ...interface{}) *goLogger {
+ logger := &goLogger{
+ logger: log.New(out, "", 0),
+ level: lvl,
+ }
+ if fmtLogging == JSONFORMATTER {
+ logger.entryFormatter = &jsonFormatter{}
+ } else if fmtLogging == KVFORMATTER {
+ logger.entryFormatter = &keyvalueFormatter{}
+ } else if fmtLogging == UNIFORMFORMATTER {
+ logger.entryFormatter = &uniformFormatter{
+ callback: fmtArgs[0].(ComponentCallback),
+ }
+ } else {
+ logger.entryFormatter = &textFormatter{}
+ }
+ return logger
+}
+
+// anonymous function variants
+
+func (gl *goLogger) Loga(level Level, f func() string) {
+ if gl.logger == nil {
+ return
+ }
+ if level <= gl.level {
+ gl.log(level, NONE, f())
+ }
+}
+func (gl *goLogger) Debuga(f func() string) {
+ gl.Loga(DEBUG, f)
+}
+
+func (gl *goLogger) Tracea(f func() string) {
+ gl.Loga(TRACE, f)
+}
+
+func (gl *goLogger) Requesta(rlevel Level, f func() string) {
+ if gl.logger == nil {
+ return
+ }
+ if REQUEST <= gl.level {
+ gl.log(REQUEST, rlevel, f())
+ }
+}
+
+func (gl *goLogger) Infoa(f func() string) {
+ gl.Loga(INFO, f)
+}
+
+func (gl *goLogger) Warna(f func() string) {
+ gl.Loga(WARN, f)
+}
+
+func (gl *goLogger) Errora(f func() string) {
+ gl.Loga(ERROR, f)
+}
+
+func (gl *goLogger) Severea(f func() string) {
+ gl.Loga(SEVERE, f)
+}
+
+func (gl *goLogger) Fatala(f func() string) {
+ gl.Loga(FATAL, f)
+}
+
+// printf-style variants
+
+func (gl *goLogger) Logf(level Level, format string, args ...interface{}) {
+ if gl.logger == nil {
+ return
+ }
+ if level <= gl.level {
+ gl.log(level, NONE, fmt.Sprintf(format, args...))
+ }
+}
+
+func (gl *goLogger) Debugf(format string, args ...interface{}) {
+ gl.Logf(DEBUG, format, args...)
+}
+
+func (gl *goLogger) Tracef(format string, args ...interface{}) {
+ gl.Logf(TRACE, format, args...)
+}
+
+func (gl *goLogger) Requestf(rlevel Level, format string, args ...interface{}) {
+ if gl.logger == nil {
+ return
+ }
+ if REQUEST <= gl.level {
+ gl.log(REQUEST, rlevel, fmt.Sprintf(format, args...))
+ }
+}
+
+func (gl *goLogger) Infof(format string, args ...interface{}) {
+ gl.Logf(INFO, format, args...)
+}
+
+func (gl *goLogger) Warnf(format string, args ...interface{}) {
+ gl.Logf(WARN, format, args...)
+}
+
+func (gl *goLogger) Errorf(format string, args ...interface{}) {
+ gl.Logf(ERROR, format, args...)
+}
+
+func (gl *goLogger) Severef(format string, args ...interface{}) {
+ gl.Logf(SEVERE, format, args...)
+}
+
+func (gl *goLogger) Fatalf(format string, args ...interface{}) {
+ gl.Logf(FATAL, format, args...)
+}
+
+func (gl *goLogger) Level() Level {
+ return gl.level
+}
+
+func (gl *goLogger) SetLevel(level Level) {
+ gl.level = level
+}
+
+func (gl *goLogger) log(level Level, rlevel Level, msg string) {
+ tm := time.Now().Format("2006-01-02T15:04:05.000-07:00") // time.RFC3339 with milliseconds
+ gl.logger.Print(gl.entryFormatter.format(tm, level, rlevel, msg))
+}
+
+type formatter interface {
+ format(string, Level, Level, string) string
+}
+
+type textFormatter struct {
+}
+
+// ex. 2016-02-10T09:15:25.498-08:00 [INFO] This is a message from test in text format
+
+func (*textFormatter) format(tm string, level Level, rlevel Level, msg string) string {
+ b := &strings.Builder{}
+ appendValue(b, tm)
+ if rlevel != NONE {
+ fmt.Fprintf(b, "[%s,%s] ", level.String(), rlevel.String())
+ } else {
+ fmt.Fprintf(b, "[%s] ", level.String())
+ }
+ appendValue(b, msg)
+ b.WriteByte('\n')
+ return b.String()
+}
+
+func appendValue(b *strings.Builder, value interface{}) {
+ if _, ok := value.(string); ok {
+ fmt.Fprintf(b, "%s ", value)
+ } else {
+ fmt.Fprintf(b, "%v ", value)
+ }
+}
+
+type keyvalueFormatter struct {
+}
+
+// ex. _time=2016-02-10T09:15:25.498-08:00 _level=INFO _msg=This is a message from test in key-value format
+
+func (*keyvalueFormatter) format(tm string, level Level, rlevel Level, msg string) string {
+ b := &strings.Builder{}
+ appendKeyValue(b, _TIME, tm)
+ appendKeyValue(b, _LEVEL, level.String())
+ if rlevel != NONE {
+ appendKeyValue(b, _RLEVEL, rlevel.String())
+ }
+ appendKeyValue(b, _MSG, msg)
+ b.WriteByte('\n')
+ return b.String()
+}
+
+func appendKeyValue(b *strings.Builder, key, value interface{}) {
+ if _, ok := value.(string); ok {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
+
+type jsonFormatter struct {
+}
+
+// ex. {"_level":"INFO","_msg":"This is a message from test in json format","_time":"2016-02-10T09:12:59.518-08:00"}
+
+func (*jsonFormatter) format(tm string, level Level, rlevel Level, msg string) string {
+ data := make(map[string]interface{}, 4)
+ data[_TIME] = tm
+ data[_LEVEL] = level.String()
+ if rlevel != NONE {
+ data[_RLEVEL] = rlevel.String()
+ }
+ data[_MSG] = msg
+ serialized, _ := json.Marshal(data)
+ var b strings.Builder
+ b.Write(serialized)
+ b.WriteByte('\n')
+ return b.String()
+}
+
+type ComponentCallback func() string
+
+type uniformFormatter struct {
+ callback ComponentCallback
+}
+
+// ex. 2019-03-15T11:28:07.652-04:00 DEBU COMPONENT.subcomponent This is a message from test in uniform format
+
+var _LEVEL_UNIFORM = []string{
+ DEBUG: "DEBU",
+ TRACE: "TRAC",
+ REQUEST: "REQU",
+ INFO: "INFO",
+ WARN: "WARN",
+ ERROR: "ERRO",
+ SEVERE: "SEVE",
+ FATAL: "FATA",
+ NONE: "NONE",
+}
+
+func (level Level) UniformString() string {
+ return _LEVEL_UNIFORM[level]
+}
+
+func (uf *uniformFormatter) format(tm string, level Level, rlevel Level, msg string) string {
+ b := &strings.Builder{}
+ appendValue(b, tm)
+ component := uf.callback()
+ if rlevel != NONE {
+ // not really any accommodation for a composite level in the uniform standard; just output as abbr,abbr
+ fmt.Fprintf(b, "%s,%s %s ", level.UniformString(), rlevel.UniformString(), component)
+ } else {
+ fmt.Fprintf(b, "%s %s ", level.UniformString(), component)
+ }
+ appendValue(b, msg)
+ b.WriteByte('\n')
+ return b.String()
+}
diff --git a/vendor/github.com/couchbase/goutils/scramsha/scramsha.go b/vendor/github.com/couchbase/goutils/scramsha/scramsha.go
new file mode 100644
index 00000000..b234bfc8
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/scramsha/scramsha.go
@@ -0,0 +1,207 @@
+// @author Couchbase
+// @copyright 2018 Couchbase, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package scramsha provides implementation of client side SCRAM-SHA
+// according to https://tools.ietf.org/html/rfc5802
+package scramsha
+
+import (
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "fmt"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/pbkdf2"
+ "hash"
+ "strconv"
+ "strings"
+)
+
+func hmacHash(message []byte, secret []byte, hashFunc func() hash.Hash) []byte {
+ h := hmac.New(hashFunc, secret)
+ h.Write(message)
+ return h.Sum(nil)
+}
+
+func shaHash(message []byte, hashFunc func() hash.Hash) []byte {
+ h := hashFunc()
+ h.Write(message)
+ return h.Sum(nil)
+}
+
+func generateClientNonce(size int) (string, error) {
+ randomBytes := make([]byte, size)
+ _, err := rand.Read(randomBytes)
+ if err != nil {
+ return "", errors.Wrap(err, "Unable to generate nonce")
+ }
+ return base64.StdEncoding.EncodeToString(randomBytes), nil
+}
+
+// ScramSha provides context for SCRAM-SHA handling
+type ScramSha struct {
+ hashSize int
+ hashFunc func() hash.Hash
+ clientNonce string
+ serverNonce string
+ salt []byte
+ i int
+ saltedPassword []byte
+ authMessage string
+}
+
+var knownMethods = []string{"SCRAM-SHA512", "SCRAM-SHA256", "SCRAM-SHA1"}
+
+// BestMethod returns SCRAM-SHA method we consider the best out of suggested
+// by server
+func BestMethod(methods string) (string, error) {
+ for _, m := range knownMethods {
+ if strings.Index(methods, m) != -1 {
+ return m, nil
+ }
+ }
+ return "", errors.Errorf(
+ "None of the server suggested methods [%s] are supported",
+ methods)
+}
+
+// NewScramSha creates context for SCRAM-SHA handling
+func NewScramSha(method string) (*ScramSha, error) {
+ s := &ScramSha{}
+
+ if method == knownMethods[0] {
+ s.hashFunc = sha512.New
+ s.hashSize = 64
+ } else if method == knownMethods[1] {
+ s.hashFunc = sha256.New
+ s.hashSize = 32
+ } else if method == knownMethods[2] {
+ s.hashFunc = sha1.New
+ s.hashSize = 20
+ } else {
+ return nil, errors.Errorf("Unsupported method %s", method)
+ }
+ return s, nil
+}
+
+// GetStartRequest builds start SCRAM-SHA request to be sent to server
+func (s *ScramSha) GetStartRequest(user string) (string, error) {
+ var err error
+ s.clientNonce, err = generateClientNonce(24)
+ if err != nil {
+ return "", errors.Wrapf(err, "Unable to generate SCRAM-SHA "+
+ "start request for user %s", user)
+ }
+
+ message := fmt.Sprintf("n,,n=%s,r=%s", user, s.clientNonce)
+ s.authMessage = message[3:]
+ return message, nil
+}
+
+// HandleStartResponse handles server response on start SCRAM-SHA request
+func (s *ScramSha) HandleStartResponse(response string) error {
+ parts := strings.Split(response, ",")
+ if len(parts) != 3 {
+ return errors.Errorf("expected 3 fields in first SCRAM-SHA-1 "+
+ "server message %s", response)
+ }
+ if !strings.HasPrefix(parts[0], "r=") || len(parts[0]) < 3 {
+ return errors.Errorf("Server sent an invalid nonce %s",
+ parts[0])
+ }
+ if !strings.HasPrefix(parts[1], "s=") || len(parts[1]) < 3 {
+ return errors.Errorf("Server sent an invalid salt %s", parts[1])
+ }
+ if !strings.HasPrefix(parts[2], "i=") || len(parts[2]) < 3 {
+ return errors.Errorf("Server sent an invalid iteration count %s",
+ parts[2])
+ }
+
+ s.serverNonce = parts[0][2:]
+ encodedSalt := parts[1][2:]
+ var err error
+ s.i, err = strconv.Atoi(parts[2][2:])
+ if err != nil {
+ return errors.Errorf("Iteration count %s must be integer.",
+ parts[2][2:])
+ }
+
+ if s.i < 1 {
+ return errors.New("Iteration count should be positive")
+ }
+
+ if !strings.HasPrefix(s.serverNonce, s.clientNonce) {
+ return errors.Errorf("Server nonce %s doesn't contain client"+
+ " nonce %s", s.serverNonce, s.clientNonce)
+ }
+
+ s.salt, err = base64.StdEncoding.DecodeString(encodedSalt)
+ if err != nil {
+ return errors.Wrapf(err, "Unable to decode salt %s",
+ encodedSalt)
+ }
+
+ s.authMessage = s.authMessage + "," + response
+ return nil
+}
+
+// GetFinalRequest builds final SCRAM-SHA request to be sent to server
+func (s *ScramSha) GetFinalRequest(pass string) string {
+ clientFinalMessageBare := "c=biws,r=" + s.serverNonce
+ s.authMessage = s.authMessage + "," + clientFinalMessageBare
+
+ s.saltedPassword = pbkdf2.Key([]byte(pass), s.salt, s.i,
+ s.hashSize, s.hashFunc)
+
+ clientKey := hmacHash([]byte("Client Key"), s.saltedPassword, s.hashFunc)
+ storedKey := shaHash(clientKey, s.hashFunc)
+ clientSignature := hmacHash([]byte(s.authMessage), storedKey, s.hashFunc)
+
+ clientProof := make([]byte, len(clientSignature))
+ for i := 0; i < len(clientSignature); i++ {
+ clientProof[i] = clientKey[i] ^ clientSignature[i]
+ }
+
+ return clientFinalMessageBare + ",p=" +
+ base64.StdEncoding.EncodeToString(clientProof)
+}
+
+// HandleFinalResponse handles server's response on final SCRAM-SHA request
+func (s *ScramSha) HandleFinalResponse(response string) error {
+ if strings.Contains(response, ",") ||
+ !strings.HasPrefix(response, "v=") {
+ return errors.Errorf("Server sent an invalid final message %s",
+ response)
+ }
+
+ decodedMessage, err := base64.StdEncoding.DecodeString(response[2:])
+ if err != nil {
+ return errors.Wrapf(err, "Unable to decode server message %s",
+ response[2:])
+ }
+ serverKey := hmacHash([]byte("Server Key"), s.saltedPassword,
+ s.hashFunc)
+ serverSignature := hmacHash([]byte(s.authMessage), serverKey,
+ s.hashFunc)
+ if string(decodedMessage) != string(serverSignature) {
+ return errors.Errorf("Server proof %s doesn't match "+
+ "the expected: %s",
+ string(decodedMessage), string(serverSignature))
+ }
+ return nil
+}
diff --git a/vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go b/vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go
new file mode 100644
index 00000000..19f32b31
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go
@@ -0,0 +1,252 @@
+// @author Couchbase
+// @copyright 2018 Couchbase, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package scramsha provides implementation of client side SCRAM-SHA
+// via Http according to https://tools.ietf.org/html/rfc7804
+package scramsha
+
+import (
+ "encoding/base64"
+ "github.com/pkg/errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+// consts used to parse scramsha response from target
+const (
+ WWWAuthenticate = "WWW-Authenticate"
+ AuthenticationInfo = "Authentication-Info"
+ Authorization = "Authorization"
+ DataPrefix = "data="
+ SidPrefix = "sid="
+)
+
+// Request provides implementation of http request that can be retried
+type Request struct {
+ body io.ReadSeeker
+
+ // Embed an HTTP request directly. This makes a *Request act exactly
+ // like an *http.Request so that all meta methods are supported.
+ *http.Request
+}
+
+type lenReader interface {
+ Len() int
+}
+
+// NewRequest creates http request that can be retried
+func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) {
+ // Wrap the body in a noop ReadCloser if non-nil. This prevents the
+ // reader from being closed by the HTTP client.
+ var rcBody io.ReadCloser
+ if body != nil {
+ rcBody = ioutil.NopCloser(body)
+ }
+
+ // Make the request with the noop-closer for the body.
+ httpReq, err := http.NewRequest(method, url, rcBody)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if we can set the Content-Length automatically.
+ if lr, ok := body.(lenReader); ok {
+ httpReq.ContentLength = int64(lr.Len())
+ }
+
+ return &Request{body, httpReq}, nil
+}
+
+func encode(str string) string {
+ return base64.StdEncoding.EncodeToString([]byte(str))
+}
+
+func decode(str string) (string, error) {
+ bytes, err := base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return "", errors.Errorf("Cannot base64 decode %s",
+ str)
+ }
+ return string(bytes), err
+}
+
+func trimPrefix(s, prefix string) (string, error) {
+ l := len(s)
+ trimmed := strings.TrimPrefix(s, prefix)
+ if l == len(trimmed) {
+ return trimmed, errors.Errorf("Prefix %s not found in %s",
+ prefix, s)
+ }
+ return trimmed, nil
+}
+
+func drainBody(resp *http.Response) {
+ defer resp.Body.Close()
+ io.Copy(ioutil.Discard, resp.Body)
+}
+
+// DoScramSha performs SCRAM-SHA handshake via Http
+func DoScramSha(req *Request,
+ username string,
+ password string,
+ client *http.Client) (*http.Response, error) {
+
+ method := "SCRAM-SHA-512"
+ s, err := NewScramSha("SCRAM-SHA512")
+ if err != nil {
+ return nil, errors.Wrap(err,
+ "Unable to initialize SCRAM-SHA handler")
+ }
+
+ message, err := s.GetStartRequest(username)
+ if err != nil {
+ return nil, err
+ }
+
+ encodedMessage := method + " " + DataPrefix + encode(message)
+
+ req.Header.Set(Authorization, encodedMessage)
+
+ res, err := client.Do(req.Request)
+ if err != nil {
+ return nil, errors.Wrap(err, "Problem sending SCRAM-SHA start"+
+ "request")
+ }
+
+ if res.StatusCode != http.StatusUnauthorized {
+ return res, nil
+ }
+
+ authHeader := res.Header.Get(WWWAuthenticate)
+ if authHeader == "" {
+ drainBody(res)
+ return nil, errors.Errorf("Header %s is not populated in "+
+ "SCRAM-SHA start response", WWWAuthenticate)
+ }
+
+ authHeader, err = trimPrefix(authHeader, method+" ")
+ if err != nil {
+ if strings.HasPrefix(authHeader, "Basic ") {
+ // user not found
+ return res, nil
+ }
+ drainBody(res)
+ return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+
+ "start response %s", authHeader)
+ }
+
+ drainBody(res)
+
+ sid, response, err := parseSidAndData(authHeader)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+
+ "start response %s", authHeader)
+ }
+
+ err = s.HandleStartResponse(response)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing SCRAM-SHA start "+
+ "response %s", response)
+ }
+
+ message = s.GetFinalRequest(password)
+ encodedMessage = method + " " + SidPrefix + sid + "," + DataPrefix +
+ encode(message)
+
+ req.Header.Set(Authorization, encodedMessage)
+
+ // rewind request body so it can be resent again
+ if req.body != nil {
+ if _, err = req.body.Seek(0, 0); err != nil {
+ return nil, errors.Errorf("Failed to seek body: %v",
+ err)
+ }
+ }
+
+ res, err = client.Do(req.Request)
+ if err != nil {
+ return nil, errors.Wrap(err, "Problem sending SCRAM-SHA final"+
+ "request")
+ }
+
+ if res.StatusCode == http.StatusUnauthorized {
+ // TODO retrieve and return error
+ return res, nil
+ }
+
+ if res.StatusCode >= http.StatusInternalServerError {
+ // in this case we cannot expect server to set headers properly
+ return res, nil
+ }
+
+ authHeader = res.Header.Get(AuthenticationInfo)
+ if authHeader == "" {
+ drainBody(res)
+ return nil, errors.Errorf("Header %s is not populated in "+
+ "SCRAM-SHA final response", AuthenticationInfo)
+ }
+
+ finalSid, response, err := parseSidAndData(authHeader)
+ if err != nil {
+ drainBody(res)
+ return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+
+ "final response %s", authHeader)
+ }
+
+ if finalSid != sid {
+ drainBody(res)
+ return nil, errors.Errorf("Sid %s returned by server "+
+ "doesn't match the original sid %s", finalSid, sid)
+ }
+
+ err = s.HandleFinalResponse(response)
+ if err != nil {
+ drainBody(res)
+ return nil, errors.Wrapf(err,
+ "Error handling SCRAM-SHA final server response %s",
+ response)
+ }
+ return res, nil
+}
+
+func parseSidAndData(authHeader string) (string, string, error) {
+ sidIndex := strings.Index(authHeader, SidPrefix)
+ if sidIndex < 0 {
+ return "", "", errors.Errorf("Cannot find %s in %s",
+ SidPrefix, authHeader)
+ }
+
+ sidEndIndex := strings.Index(authHeader, ",")
+ if sidEndIndex < 0 {
+ return "", "", errors.Errorf("Cannot find ',' in %s",
+ authHeader)
+ }
+
+ sid := authHeader[sidIndex+len(SidPrefix) : sidEndIndex]
+
+ dataIndex := strings.Index(authHeader, DataPrefix)
+ if dataIndex < 0 {
+ return "", "", errors.Errorf("Cannot find %s in %s",
+ DataPrefix, authHeader)
+ }
+
+ data, err := decode(authHeader[dataIndex+len(DataPrefix):])
+ if err != nil {
+ return "", "", err
+ }
+ return sid, data, nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ee447df3..4a9e9794 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -285,6 +285,21 @@ github.com/cenkalti/backoff
# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
+# github.com/couchbase/go-couchbase v0.1.1
+## explicit; go 1.13
+github.com/couchbase/go-couchbase
+# github.com/couchbase/go_n1ql v0.0.0-20220303011133-0ed4bf93e31d
+## explicit
+github.com/couchbase/go_n1ql
+# github.com/couchbase/gomemcached v0.3.3
+## explicit; go 1.13
+github.com/couchbase/gomemcached
+github.com/couchbase/gomemcached/client
+github.com/couchbase/gomemcached/internal/flatbuffers/systemevents
+# github.com/couchbase/goutils v0.1.2
+## explicit; go 1.13
+github.com/couchbase/goutils/logging
+github.com/couchbase/goutils/scramsha
# github.com/danieljoos/wincred v1.2.2
## explicit; go 1.18
github.com/danieljoos/wincred