diff --git a/src/current/_config_cockroachdb.yml b/src/current/_config_cockroachdb.yml index 2f5b1787337..e19080fb5df 100644 --- a/src/current/_config_cockroachdb.yml +++ b/src/current/_config_cockroachdb.yml @@ -4,4 +4,5 @@ destination: _site/docs homepage_title: CockroachDB Docs versions: stable: v25.2 - dev: v25.2 \ No newline at end of file + dev: v25.3 + diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml index 2cbcfe5fb67..b715f027b97 100644 --- a/src/current/_data/releases.yml +++ b/src/current/_data/releases.yml @@ -7,7 +7,6 @@ docker: docker_image: cockroachdb/cockroach-unstable is_not_downloadable: true - - release_name: beta-20160414 major_version: v1.0 release_date: '2016-04-14' @@ -18,7 +17,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160407 is_not_downloadable: true - - release_name: beta-20160421 major_version: v1.0 release_date: '2016-04-21' @@ -29,7 +27,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160414 is_not_downloadable: true - - release_name: beta-20160428 major_version: v1.0 release_date: '2016-04-28' @@ -40,7 +37,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160421 is_not_downloadable: true - - release_name: beta-20160505 major_version: v1.0 release_date: '2016-05-05' @@ -51,7 +47,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160428 is_not_downloadable: true - - release_name: beta-20160512 major_version: v1.0 release_date: '2016-05-12' @@ -62,7 +57,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160505 is_not_downloadable: true - - release_name: beta-20160519 major_version: v1.0 release_date: '2016-05-19' @@ -73,7 +67,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160512 is_not_downloadable: true - - release_name: beta-20160526 major_version: v1.0 release_date: '2016-05-26' @@ -84,7 +77,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160519 is_not_downloadable: true - - release_name: beta-20160602 major_version: v1.0 release_date: '2016-06-02' @@ -95,7 +87,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160526 is_not_downloadable: true - - release_name: beta-20160609 major_version: v1.0 release_date: '2016-06-09' @@ -106,7 +97,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160602 is_not_downloadable: true - - release_name: beta-20160616 major_version: v1.0 release_date: '2016-06-16' @@ -117,7 +107,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160609 is_not_downloadable: true - - release_name: beta-20160629 major_version: v1.0 release_date: '2016-06-29' @@ -128,7 +117,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160616 is_not_downloadable: true - - release_name: beta-20160714 major_version: v1.0 release_date: '2016-07-14' @@ -139,7 +127,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160629 is_not_downloadable: true - - release_name: beta-20160721 major_version: v1.0 release_date: '2016-07-21' @@ -150,7 +137,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160714 is_not_downloadable: true - - release_name: beta-20160728 major_version: v1.0 release_date: '2016-07-28' @@ -161,7 +147,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160721 is_not_downloadable: true - - release_name: beta-20160829 major_version: v1.0 release_date: '2016-08-29' @@ -172,7 +157,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160728 is_not_downloadable: true - - release_name: beta-20160908 major_version: v1.0 release_date: '2016-09-09' @@ -183,7 +167,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160829 is_not_downloadable: true - - release_name: beta-20160915 major_version: v1.0 release_date: '2016-09-15' @@ -194,7 +177,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160908 is_not_downloadable: true - - release_name: beta-20160929 major_version: v1.0 release_date: '2016-09-29' @@ -205,7 +187,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160915 is_not_downloadable: true - - release_name: beta-20161006 major_version: v1.0 release_date: '2016-10-06' @@ -216,7 +197,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20160929 is_not_downloadable: true - - release_name: beta-20161013 major_version: v1.0 release_date: '2016-10-13' @@ -227,7 +207,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161006 is_not_downloadable: true - - release_name: beta-20161027 major_version: v1.0 release_date: '2016-10-27' @@ -238,7 +217,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161013 is_not_downloadable: true - - release_name: beta-20161103 major_version: v1.0 release_date: '2016-11-03' @@ -249,7 +227,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161027 is_not_downloadable: true - - release_name: beta-20161201 major_version: v1.0 release_date: '2016-12-01' @@ -260,7 +237,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161103 is_not_downloadable: true - - release_name: beta-20161208 major_version: v1.0 release_date: '2016-12-08' @@ -271,7 +247,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161201 is_not_downloadable: true - - release_name: beta-20161215 major_version: v1.0 release_date: '2016-12-15' @@ -282,7 +257,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161208 is_not_downloadable: true - - release_name: beta-20170105 major_version: v1.0 release_date: '2017-01-05' @@ -293,7 +267,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20161215 is_not_downloadable: true - - release_name: beta-20170112 major_version: v1.0 release_date: '2017-01-12' @@ -304,7 +277,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170105 is_not_downloadable: true - - release_name: beta-20170126 major_version: v1.0 release_date: '2017-01-26' @@ -315,7 +287,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170112 is_not_downloadable: true - - release_name: beta-20170209 major_version: v1.0 release_date: '2017-02-09' @@ -326,7 +297,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170126 is_not_downloadable: true - - release_name: beta-20170216 major_version: v1.0 release_date: '2017-02-16' @@ -337,7 +307,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170209 is_not_downloadable: true - - release_name: beta-20170223 major_version: v1.0 release_date: '2017-02-23' @@ -348,7 +317,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170216 is_not_downloadable: true - - release_name: beta-20170309 major_version: v1.0 release_date: '2017-03-09' @@ -359,7 +327,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170223 is_not_downloadable: true - - release_name: beta-20170323 major_version: v1.0 release_date: '2017-03-23' @@ -370,7 +337,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: beta-20170309 is_not_downloadable: true - - release_name: beta-20170330 major_version: v1.0 release_date: '2017-03-30' @@ -382,7 +348,6 @@ source: true previous_release: beta-20170323 is_not_downloadable: true - - release_name: beta-20170413 major_version: v1.0 release_date: '2017-04-13' @@ -394,7 +359,6 @@ source: true previous_release: beta-20170330 is_not_downloadable: true - - release_name: beta-20170420 major_version: v1.0 release_date: '2017-04-20' @@ -406,7 +370,6 @@ source: true previous_release: beta-20170413 is_not_downloadable: true - - release_name: v1.0-rc.1 major_version: v1.0 release_date: '2017-05-01' @@ -418,7 +381,6 @@ source: true previous_release: beta-20170420 is_not_downloadable: true - - release_name: v1.0-rc.2 major_version: v1.0 release_date: '2017-05-05' @@ -430,7 +392,6 @@ source: true previous_release: v1.0-rc.1 is_not_downloadable: true - - release_name: v1.0 major_version: v1.0 release_date: '2017-05-10' @@ -442,7 +403,6 @@ source: true previous_release: v1.0-rc.2 is_not_downloadable: true - - release_name: v1.0.1 major_version: v1.0 release_date: '2017-05-25' @@ -454,7 +414,6 @@ source: true previous_release: v1.0 is_not_downloadable: true - - release_name: v1.1-alpha.20170601 major_version: v1.1 release_date: '2017-06-01' @@ -465,7 +424,6 @@ docker_image: cockroachdb/cockroach-unstable source: true is_not_downloadable: true - - release_name: v1.1-alpha.20170608 major_version: v1.1 release_date: '2017-06-08' @@ -477,7 +435,6 @@ source: true previous_release: v1.1-alpha.20170601 is_not_downloadable: true - - release_name: v1.0.2 major_version: v1.0 release_date: '2017-06-15' @@ -489,7 +446,6 @@ source: true previous_release: v1.0.1 is_not_downloadable: true - - release_name: v1.1-alpha.20170622 major_version: v1.1 release_date: '2017-06-22' @@ -501,7 +457,6 @@ source: true previous_release: v1.1-alpha.20170608 is_not_downloadable: true - - release_name: v1.1-alpha.20170629 major_version: v1.1 release_date: '2017-06-29' @@ -513,7 +468,6 @@ source: true previous_release: v1.1-alpha.20170622 is_not_downloadable: true - - release_name: v1.0.3 major_version: v1.0 release_date: '2017-07-06' @@ -525,7 +479,6 @@ source: true previous_release: v1.0.2 is_not_downloadable: true - - release_name: v1.1-alpha.20170713 major_version: v1.1 release_date: '2017-07-13' @@ -537,7 +490,6 @@ source: true previous_release: v1.1-alpha.20170629 is_not_downloadable: true - - release_name: v1.1-alpha.20170720 major_version: v1.1 release_date: '2017-07-20' @@ -549,7 +501,6 @@ source: true previous_release: v1.1-alpha.20170713 is_not_downloadable: true - - release_name: v1.0.4 major_version: v1.0 release_date: '2017-07-27' @@ -561,7 +512,6 @@ source: true previous_release: v1.0.3 is_not_downloadable: true - - release_name: v1.1-alpha.20170803 major_version: v1.1 release_date: '2017-08-03' @@ -573,7 +523,6 @@ source: true previous_release: v1.1-alpha.20170720 is_not_downloadable: true - - release_name: v1.1-alpha.20170810 major_version: v1.1 release_date: '2017-08-10' @@ -585,7 +534,6 @@ source: true previous_release: v1.1-alpha.20170803 is_not_downloadable: true - - release_name: v1.1-alpha.20170817 major_version: v1.1 release_date: '2017-08-17' @@ -597,7 +545,6 @@ source: true previous_release: v1.1-alpha.20170810 is_not_downloadable: true - - release_name: v1.0.5 major_version: v1.0 release_date: '2017-08-24' @@ -609,7 +556,6 @@ source: true previous_release: v1.0.4 is_not_downloadable: true - - release_name: v1.1-beta.20170907 major_version: v1.1 release_date: '2017-09-07' @@ -621,7 +567,6 @@ source: true previous_release: v1.1-alpha.20170817 is_not_downloadable: true - - release_name: v1.0.6 major_version: v1.0 release_date: '2017-09-14' @@ -633,7 +578,6 @@ source: true previous_release: v1.0.5 is_not_downloadable: true - - release_name: v1.1-beta.20170921 major_version: v1.1 release_date: '2017-09-21' @@ -645,7 +589,6 @@ source: true previous_release: v1.1-beta.20170907 is_not_downloadable: true - - release_name: v1.1-beta.20170928 major_version: v1.1 release_date: '2017-09-28' @@ -657,7 +600,6 @@ source: true previous_release: v1.1-beta.20170921 is_not_downloadable: true - - release_name: v1.1.0-rc.1 major_version: v1.1 release_date: '2017-10-05' @@ -669,7 +611,6 @@ source: true previous_release: v1.1-beta.20170928 is_not_downloadable: true - - release_name: v1.1.0 major_version: v1.1 release_date: '2017-10-12' @@ -681,7 +622,6 @@ source: true previous_release: v1.1.0-rc.1 is_not_downloadable: true - - release_name: v1.1.1 major_version: v1.1 release_date: '2017-10-19' @@ -693,7 +633,6 @@ source: true previous_release: v1.1.0 is_not_downloadable: true - - release_name: v1.2-alpha.20171026 major_version: v2.0 release_date: '2017-10-26' @@ -704,7 +643,6 @@ docker_image: cockroachdb/cockroach-unstable source: true is_not_downloadable: true - - release_name: v1.1.2 major_version: v1.1 release_date: '2017-11-02' @@ -716,7 +654,6 @@ source: true previous_release: v1.1.1 is_not_downloadable: true - - release_name: v1.2-alpha.20171113 major_version: v2.0 release_date: '2017-11-13' @@ -727,7 +664,6 @@ docker_image: cockroachdb/cockroach-unstable previous_release: v1.2-alpha.20171026 is_not_downloadable: true - - release_name: v1.1.3 major_version: v1.1 release_date: '2017-11-27' @@ -739,7 +675,6 @@ source: true previous_release: v1.1.2 is_not_downloadable: true - - release_name: v1.2-alpha.20171204 major_version: v2.0 release_date: '2017-12-04' @@ -751,7 +686,6 @@ source: true previous_release: v1.2-alpha.20171113 is_not_downloadable: true - - release_name: v1.2-alpha.20171211 major_version: v2.0 release_date: '2017-12-11' @@ -763,7 +697,6 @@ source: true previous_release: v1.2-alpha.20171204 is_not_downloadable: true - - release_name: v2.0-alpha.20171218 major_version: v2.0 release_date: '2017-12-18' @@ -775,7 +708,6 @@ source: true previous_release: v1.2-alpha.20171211 is_not_downloadable: true - - release_name: v1.1.4 major_version: v1.1 release_date: '2018-01-08' @@ -787,7 +719,6 @@ source: true previous_release: v1.1.3 is_not_downloadable: true - - release_name: v2.0-alpha.20180129 major_version: v2.0 release_date: '2018-01-29' @@ -799,7 +730,6 @@ source: true previous_release: v2.0-alpha.20171218 is_not_downloadable: true - - release_name: v1.1.5 major_version: v1.1 release_date: '2018-02-05' @@ -811,7 +741,6 @@ source: true previous_release: v1.1.4 is_not_downloadable: true - - release_name: v1.0.7 major_version: v1.0 release_date: '2018-02-13' @@ -823,7 +752,6 @@ source: true previous_release: v1.0.6 is_not_downloadable: true - - release_name: v2.0-beta.20180305 major_version: v2.0 release_date: '2018-03-05' @@ -835,7 +763,6 @@ source: true previous_release: v2.0-alpha.20180129 is_not_downloadable: true - - release_name: v1.1.6 major_version: v1.1 release_date: '2018-03-12' @@ -847,7 +774,6 @@ source: true previous_release: v1.1.5 is_not_downloadable: true - - release_name: v2.0-beta.20180312 major_version: v2.0 release_date: '2018-03-12' @@ -859,7 +785,6 @@ source: true previous_release: v2.0-beta.20180305 is_not_downloadable: true - - release_name: v2.0-beta.20180319 major_version: v2.0 release_date: '2018-03-19' @@ -871,7 +796,6 @@ source: true previous_release: v2.0-beta.20180312 is_not_downloadable: true - - release_name: v1.1.7 major_version: v1.1 release_date: '2018-03-26' @@ -883,7 +807,6 @@ source: true previous_release: v1.1.6 is_not_downloadable: true - - release_name: v2.0-beta.20180326 major_version: v2.0 release_date: '2018-03-26' @@ -895,7 +818,6 @@ source: true previous_release: v2.0-beta.20180319 is_not_downloadable: true - - release_name: v2.0-rc.1 major_version: v2.0 release_date: '2018-04-02' @@ -907,7 +829,6 @@ source: true previous_release: v2.0-beta.20180326 is_not_downloadable: true - - release_name: v2.0.0 major_version: v2.0 release_date: '2018-04-04' @@ -919,7 +840,6 @@ source: true previous_release: v2.0-rc.1 is_not_downloadable: true - - release_name: v2.1.0-alpha.20180416 major_version: v2.1 release_date: '2018-04-16' @@ -930,7 +850,6 @@ docker_image: cockroachdb/cockroach-unstable source: true is_not_downloadable: true - - release_name: v1.1.8 major_version: v1.1 release_date: '2018-04-23' @@ -942,7 +861,6 @@ source: true previous_release: v1.1.7 is_not_downloadable: true - - release_name: v2.0.1 major_version: v2.0 release_date: '2018-04-23' @@ -954,7 +872,6 @@ source: true previous_release: v2.0.0 is_not_downloadable: true - - release_name: v2.1.0-alpha.20180507 major_version: v2.1 release_date: '2018-05-07' @@ -966,7 +883,6 @@ source: true previous_release: v2.1.0-alpha.20180416 is_not_downloadable: true - - release_name: v2.0.2 major_version: v2.0 release_date: '2018-05-21' @@ -978,7 +894,6 @@ source: true previous_release: v2.0.1 is_not_downloadable: true - - release_name: v2.1.0-alpha.20180604 major_version: v2.1 release_date: '2018-06-04' @@ -990,7 +905,6 @@ source: true previous_release: v2.1.0-alpha.20180507 is_not_downloadable: true - - release_name: v2.0.3 major_version: v2.0 release_date: '2018-06-18' @@ -1002,7 +916,6 @@ source: true previous_release: v2.0.2 is_not_downloadable: true - - release_name: v2.1.0-alpha.20180702 major_version: v2.1 release_date: '2018-07-02' @@ -1014,7 +927,6 @@ source: true previous_release: v2.1.0-alpha.20180604 is_not_downloadable: true - - release_name: v2.0.4 major_version: v2.0 release_date: '2018-07-16' @@ -1026,7 +938,6 @@ source: true previous_release: v2.0.3 is_not_downloadable: true - - release_name: v2.1.0-alpha.20180730 major_version: v2.1 release_date: '2018-07-30' @@ -1038,7 +949,6 @@ source: true previous_release: v2.1.0-alpha.20180702 is_not_downloadable: true - - release_name: v2.0.5 major_version: v2.0 release_date: '2018-08-13' @@ -1050,7 +960,6 @@ source: true previous_release: v2.0.4 is_not_downloadable: true - - release_name: v2.1.0-beta.20180827 major_version: v2.1 release_date: '2018-08-27' @@ -1062,7 +971,6 @@ source: true previous_release: v2.1.0-alpha.20180730 is_not_downloadable: true - - release_name: v2.1.0-beta.20180904 major_version: v2.1 release_date: '2018-09-04' @@ -1074,7 +982,6 @@ source: true previous_release: v2.1.0-beta.20180827 is_not_downloadable: true - - release_name: v2.1.0-beta.20180910 major_version: v2.1 release_date: '2018-09-10' @@ -1086,7 +993,6 @@ source: true previous_release: v2.1.0-beta.20180904 is_not_downloadable: true - - release_name: v2.1.0-beta.20180917 major_version: v2.1 release_date: '2018-09-17' @@ -1098,7 +1004,6 @@ source: true previous_release: v2.1.0-beta.20180910 is_not_downloadable: true - - release_name: v2.1.0-beta.20180924 major_version: v2.1 release_date: '2018-09-24' @@ -1110,7 +1015,6 @@ source: true previous_release: v2.1.0-beta.20180917 is_not_downloadable: true - - release_name: v1.1.9 major_version: v1.1 release_date: '2018-10-01' @@ -1122,7 +1026,6 @@ source: true previous_release: v1.1.8 is_not_downloadable: true - - release_name: v2.0.6 major_version: v2.0 release_date: '2018-10-01' @@ -1134,7 +1037,6 @@ source: true previous_release: v2.0.5 is_not_downloadable: true - - release_name: v2.1.0-beta.20181001 major_version: v2.1 release_date: '2018-10-01' @@ -1146,7 +1048,6 @@ source: true previous_release: v2.1.0-beta.20180924 is_not_downloadable: true - - release_name: v2.1.0-beta.20181008 major_version: v2.1 release_date: '2018-10-08' @@ -1158,7 +1059,6 @@ source: true previous_release: v2.1.0-beta.20181001 is_not_downloadable: true - - release_name: v2.1.0-beta.20181015 major_version: v2.1 release_date: '2018-10-15' @@ -1170,7 +1070,6 @@ source: true previous_release: v2.1.0-beta.20181008 is_not_downloadable: true - - release_name: v2.1.0-rc.1 major_version: v2.1 release_date: '2018-10-22' @@ -1182,7 +1081,6 @@ source: true previous_release: v2.1.0-beta.20181015 is_not_downloadable: true - - release_name: v2.1.0-rc.2 major_version: v2.1 release_date: '2018-10-25' @@ -1194,7 +1092,6 @@ source: true previous_release: v2.1.0-rc.1 is_not_downloadable: true - - release_name: v2.1.0 major_version: v2.1 release_date: '2018-10-30' @@ -1206,7 +1103,6 @@ source: true previous_release: v2.1.0-rc.2 is_not_downloadable: true - - release_name: v2.1.1 major_version: v2.1 release_date: '2018-11-19' @@ -1218,7 +1114,6 @@ source: true previous_release: v2.1.0 is_not_downloadable: true - - release_name: v2.2.0-alpha.20181119 major_version: v19.1 release_date: '2018-11-19' @@ -1229,7 +1124,6 @@ docker_image: cockroachdb/cockroach-unstable source: true is_not_downloadable: true - - release_name: v2.0.7 major_version: v2.0 release_date: '2018-12-10' @@ -1241,7 +1135,6 @@ source: true previous_release: v2.0.6 is_not_downloadable: true - - release_name: v2.1.2 major_version: v2.1 release_date: '2018-12-10' @@ -1253,7 +1146,6 @@ source: true previous_release: v2.1.1 is_not_downloadable: true - - release_name: v2.1.3 major_version: v2.1 release_date: '2018-12-17' @@ -1265,7 +1157,6 @@ source: true previous_release: v2.1.2 is_not_downloadable: true - - release_name: v2.2.0-alpha.20181217 major_version: v19.1 release_date: '2018-12-17' @@ -1277,7 +1168,6 @@ source: true previous_release: v2.2.0-alpha.20181119 is_not_downloadable: true - - release_name: v2.2.0-alpha.20190114 major_version: v19.1 release_date: '2019-01-14' @@ -1289,7 +1179,6 @@ source: true previous_release: v2.2.0-alpha.20181217 is_not_downloadable: true - - release_name: v2.1.4 major_version: v2.1 release_date: '2019-01-22' @@ -1301,7 +1190,6 @@ source: true previous_release: v2.1.3 is_not_downloadable: true - - release_name: v2.2.0-alpha.20190211 major_version: v19.1 release_date: '2019-02-11' @@ -1313,7 +1201,6 @@ source: true previous_release: v2.2.0-alpha.20190114 is_not_downloadable: true - - release_name: v2.1.5 major_version: v2.1 release_date: '2019-02-19' @@ -1325,7 +1212,6 @@ source: true previous_release: v2.1.4 is_not_downloadable: true - - release_name: v19.1.0-beta.20190225 major_version: v19.1 release_date: '2019-02-25' @@ -1337,7 +1223,6 @@ source: true previous_release: v2.2.0-alpha.20190211 is_not_downloadable: true - - release_name: v19.1.0-beta.20190304 major_version: v19.1 release_date: '2019-03-04' @@ -1349,7 +1234,6 @@ source: true previous_release: v19.1.0-beta.20190225 is_not_downloadable: true - - release_name: v2.1.6 major_version: v2.1 release_date: '2019-03-11' @@ -1361,7 +1245,6 @@ source: true previous_release: v2.1.5 is_not_downloadable: true - - release_name: v19.1.0-beta.20190318 major_version: v19.1 release_date: '2019-03-18' @@ -1373,7 +1256,6 @@ source: true previous_release: v19.1.0-beta.20190304 is_not_downloadable: true - - release_name: v19.1.0-rc.1 major_version: v19.1 release_date: '2019-04-02' @@ -1385,7 +1267,6 @@ source: true previous_release: v19.1.0-beta.20190318 is_not_downloadable: true - - release_name: v19.1.0-rc.2 major_version: v19.1 release_date: '2019-04-08' @@ -1397,7 +1278,6 @@ source: true previous_release: v19.1.0-rc.1 is_not_downloadable: true - - release_name: v19.1.0-rc.3 major_version: v19.1 release_date: '2019-04-15' @@ -1409,7 +1289,6 @@ source: true previous_release: v19.1.0-rc.2 is_not_downloadable: true - - release_name: v19.1.0-rc.4 major_version: v19.1 release_date: '2019-04-25' @@ -1421,7 +1300,6 @@ source: true previous_release: v19.1.0-rc.3 is_not_downloadable: true - - release_name: v19.1.0 major_version: v19.1 release_date: '2019-04-30' @@ -1433,7 +1311,6 @@ source: true previous_release: v19.1.0-rc.4 is_not_downloadable: true - - release_name: v2.1.7 major_version: v2.1 release_date: '2019-05-14' @@ -1445,7 +1322,6 @@ source: true previous_release: v2.1.6 is_not_downloadable: true - - release_name: v19.1.1 major_version: v19.1 release_date: '2019-05-20' @@ -1457,7 +1333,6 @@ source: true previous_release: v19.1.0 is_not_downloadable: true - - release_name: v19.2.0-alpha.20190606 major_version: v19.2 release_date: '2019-06-06' @@ -1468,7 +1343,6 @@ docker_image: cockroachdb/cockroach-unstable source: true is_not_downloadable: true - - release_name: v19.1.2 major_version: v19.1 release_date: '2019-06-17' @@ -1480,7 +1354,6 @@ source: true previous_release: v19.1.1 is_not_downloadable: true - - release_name: v19.2.0-alpha.20190701 major_version: v19.2 release_date: '2019-07-01' @@ -1492,7 +1365,6 @@ source: true previous_release: v19.2.0-alpha.20190606 is_not_downloadable: true - - release_name: v2.1.8 major_version: v2.1 release_date: '2019-07-15' @@ -1504,7 +1376,6 @@ source: true previous_release: v2.1.7 is_not_downloadable: true - - release_name: v19.1.3 major_version: v19.1 release_date: '2019-07-15' @@ -1516,7 +1387,6 @@ source: true previous_release: v19.1.2 is_not_downloadable: true - - release_name: v19.2.0-alpha.20190805 major_version: v19.2 release_date: '2019-08-05' @@ -1528,7 +1398,6 @@ source: true previous_release: v19.2.0-alpha.20190701 is_not_downloadable: true - - release_name: v19.1.4 major_version: v19.1 release_date: '2019-08-13' @@ -1540,7 +1409,6 @@ source: true previous_release: v19.1.3 is_not_downloadable: true - - release_name: v2.1.9 major_version: v2.1 release_date: '2019-09-23' @@ -1552,7 +1420,6 @@ source: true previous_release: v2.1.8 is_not_downloadable: true - - release_name: v19.1.5 major_version: v19.1 release_date: '2019-09-30' @@ -1564,7 +1431,6 @@ source: true previous_release: v19.1.4 is_not_downloadable: true - - release_name: v19.2.0-beta.20190930 major_version: v19.2 release_date: '2019-09-30' @@ -1576,7 +1442,6 @@ source: true previous_release: v19.2.0-alpha.20190805 is_not_downloadable: true - - release_name: v19.2.0-beta.20191014 major_version: v19.2 release_date: '2019-10-14' @@ -1588,7 +1453,6 @@ source: true previous_release: v19.2.0-beta.20190930 is_not_downloadable: true - - release_name: v19.2.0-rc.1 major_version: v19.2 release_date: '2019-10-21' @@ -1600,7 +1464,6 @@ source: true previous_release: v19.2.0-beta.20191014 is_not_downloadable: true - - release_name: v19.2.0-rc.2 major_version: v19.2 release_date: '2019-10-28' @@ -1612,7 +1475,6 @@ source: true previous_release: v19.2.0-rc.1 is_not_downloadable: true - - release_name: v19.2.0-rc.3 major_version: v19.2 release_date: '2019-11-04' @@ -1624,7 +1486,6 @@ source: true previous_release: v19.2.0-rc.2 is_not_downloadable: true - - release_name: v19.2.0-rc.4 major_version: v19.2 release_date: '2019-11-07' @@ -1636,7 +1497,6 @@ source: true previous_release: v19.2.0-rc.3 is_not_downloadable: true - - release_name: v19.2.0 major_version: v19.2 release_date: '2019-11-12' @@ -1649,7 +1509,6 @@ source: true previous_release: v19.2.0-rc.4 is_not_downloadable: true - - release_name: v20.1.0-alpha.20191118 major_version: v20.1 release_date: '2019-11-18' @@ -1660,7 +1519,6 @@ docker: docker_image: cockroachdb/cockroach-unstable source: true - - release_name: v19.2.1 major_version: v19.2 release_date: '2019-11-25' @@ -1673,7 +1531,6 @@ source: true previous_release: v19.2.0 is_not_downloadable: true - - release_name: v2.1.10 major_version: v2.1 release_date: '2019-12-16' @@ -1685,7 +1542,6 @@ source: true previous_release: v2.1.9 is_not_downloadable: true - - release_name: v19.1.6 major_version: v19.1 release_date: '2019-12-16' @@ -1697,7 +1553,6 @@ source: true previous_release: v19.1.5 is_not_downloadable: true - - release_name: v19.2.2 major_version: v19.2 release_date: '2019-12-16' @@ -1710,7 +1565,6 @@ source: true previous_release: v19.2.1 is_not_downloadable: true - - release_name: v20.1.0-alpha20191216 major_version: v20.1 release_date: '2019-12-16' @@ -1722,7 +1576,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-alpha.20191118 - - release_name: v19.1.7 major_version: v19.1 release_date: '2020-01-27' @@ -1734,7 +1587,6 @@ source: true previous_release: v19.1.6 is_not_downloadable: true - - release_name: v2.1.11 major_version: v2.1 release_date: '2020-01-29' @@ -1746,7 +1598,6 @@ source: true previous_release: v2.1.10 is_not_downloadable: true - - release_name: v20.1.0-alpha.20200123 major_version: v20.1 release_date: '2020-01-30' @@ -1758,7 +1609,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-alpha20191216 - - release_name: v19.2.3 major_version: v19.2 release_date: '2020-02-03' @@ -1771,7 +1621,6 @@ source: true previous_release: v19.2.2 is_not_downloadable: true - - release_name: v19.1.8 major_version: v19.1 release_date: '2020-02-11' @@ -1783,7 +1632,6 @@ source: true previous_release: v19.1.7 is_not_downloadable: true - - release_name: v19.2.4 major_version: v19.2 release_date: '2020-02-11' @@ -1796,7 +1644,6 @@ source: true previous_release: v19.2.3 is_not_downloadable: true - - release_name: v20.1.0-beta.1 major_version: v20.1 release_date: '2020-02-17' @@ -1808,7 +1655,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-alpha.20200123 - - release_name: v20.1.0-beta.2 major_version: v20.1 release_date: '2020-03-02' @@ -1820,7 +1666,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-beta.1 - - release_name: v19.2.5 major_version: v19.2 release_date: '2020-03-23' @@ -1833,7 +1678,6 @@ source: true previous_release: v19.2.4 is_not_downloadable: true - - release_name: v20.1.0-beta.3 major_version: v20.1 release_date: '2020-03-25' @@ -1845,7 +1689,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-beta.2 - - release_name: v20.1.0-beta.4 major_version: v20.1 release_date: '2020-03-30' @@ -1857,7 +1700,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-beta.3 - - release_name: v19.2.6 major_version: v19.2 release_date: '2020-04-13' @@ -1870,7 +1712,6 @@ source: true previous_release: v19.2.5 is_not_downloadable: true - - release_name: v20.1.0-rc.1 major_version: v20.1 release_date: '2020-04-14' @@ -1882,7 +1723,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-beta.4 - - release_name: v20.1.0-rc.2 major_version: v20.1 release_date: '2020-04-21' @@ -1894,7 +1734,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.1.0-rc.1 - - release_name: v19.1.9 major_version: v19.1 release_date: '2020-05-12' @@ -1906,7 +1745,6 @@ source: true previous_release: v19.1.8 is_not_downloadable: true - - release_name: v20.1.0 major_version: v20.1 release_date: '2020-05-12' @@ -1918,7 +1756,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.0-rc.2 - - release_name: v19.2.7 major_version: v19.2 release_date: '2020-05-20' @@ -1931,7 +1768,6 @@ source: true previous_release: v19.2.6 is_not_downloadable: true - - release_name: v20.1.1 major_version: v20.1 release_date: '2020-05-26' @@ -1943,7 +1779,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.0 - - release_name: v20.1.2 major_version: v20.1 release_date: '2020-06-17' @@ -1955,7 +1790,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.1 - - release_name: v20.2.0-alpha.1 major_version: v20.2 release_date: '2020-06-17' @@ -1966,7 +1800,6 @@ docker: docker_image: cockroachdb/cockroach-unstable source: true - - release_name: v19.1.10 major_version: v19.1 release_date: '2020-06-29' @@ -1979,7 +1812,6 @@ source: true previous_release: v19.1.9 is_not_downloadable: true - - release_name: v19.2.8 major_version: v19.2 release_date: '2020-06-29' @@ -1992,7 +1824,6 @@ source: true previous_release: v19.2.7 is_not_downloadable: true - - release_name: v20.1.3 major_version: v20.1 release_date: '2020-06-29' @@ -2004,7 +1835,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.2 - - release_name: v19.2.9 major_version: v19.2 release_date: '2020-07-06' @@ -2017,7 +1847,6 @@ source: true previous_release: v19.2.8 is_not_downloadable: true - - release_name: v19.1.11 major_version: v19.1 release_date: '2020-07-07' @@ -2030,7 +1859,6 @@ source: true previous_release: v19.1.10 is_not_downloadable: true - - release_name: v20.2.0-alpha.2 major_version: v20.2 release_date: '2020-07-27' @@ -2042,7 +1870,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-alpha.1 - - release_name: v20.1.4 major_version: v20.1 release_date: '2020-08-03' @@ -2055,7 +1882,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.3 - - release_name: v19.2.10 major_version: v19.2 release_date: '2020-08-24' @@ -2068,7 +1894,6 @@ source: true previous_release: v19.2.9 is_not_downloadable: true - - release_name: v20.2.0-alpha.3 major_version: v20.2 release_date: '2020-08-25' @@ -2080,7 +1905,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-alpha.2 - - release_name: v20.1.5 major_version: v20.1 release_date: '2020-08-31' @@ -2093,7 +1917,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.4 - - release_name: v20.2.0-beta.1 major_version: v20.2 release_date: '2020-09-14' @@ -2105,7 +1928,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-alpha.3 - - release_name: v20.1.6 major_version: v20.1 release_date: '2020-09-24' @@ -2117,7 +1939,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.5 - - release_name: v20.2.0-beta.2 major_version: v20.2 release_date: '2020-09-25' @@ -2129,7 +1950,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-beta.1 - - release_name: v20.2.0-beta.3 major_version: v20.2 release_date: '2020-09-30' @@ -2141,7 +1961,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-beta.2 - - release_name: v20.2.0-beta.4 major_version: v20.2 release_date: '2020-10-06' @@ -2153,7 +1972,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-beta.3 - - release_name: v19.2.11 major_version: v19.2 release_date: '2020-10-12' @@ -2166,7 +1984,6 @@ source: true previous_release: v19.2.10 is_not_downloadable: true - - release_name: v20.1.7 major_version: v20.1 release_date: '2020-10-12' @@ -2178,7 +1995,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.6 - - release_name: v20.2.0-rc.1 major_version: v20.2 release_date: '2020-10-15' @@ -2190,7 +2006,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-beta.4 - - release_name: v20.2.0-rc.2 major_version: v20.2 release_date: '2020-10-20' @@ -2202,7 +2017,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-rc.1 - - release_name: v20.1.8 major_version: v20.1 release_date: '2020-10-21' @@ -2214,7 +2028,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.7 - - release_name: v20.2.0-rc.3 major_version: v20.2 release_date: '2020-10-26' @@ -2226,7 +2039,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-rc.2 - - release_name: v20.2.0-rc.4 major_version: v20.2 release_date: '2020-11-03' @@ -2238,7 +2050,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v20.2.0-rc.3 - - release_name: v20.2.0 major_version: v20.2 release_date: '2020-11-10' @@ -2250,7 +2061,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.0-rc.4 - - release_name: v20.2.1 major_version: v20.2 release_date: '2020-11-20' @@ -2262,7 +2072,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.0 - - release_name: v20.2.2 major_version: v20.2 release_date: '2020-11-25' @@ -2274,7 +2083,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.1 - - release_name: v20.1.9 major_version: v20.1 release_date: '2020-12-01' @@ -2286,7 +2094,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.8 - - release_name: v21.1.0-alpha.1 major_version: v21.1 release_date: '2020-12-08' @@ -2297,7 +2104,6 @@ docker: docker_image: cockroachdb/cockroach-unstable source: true - - release_name: v20.2.3 major_version: v20.2 release_date: '2020-12-14' @@ -2309,7 +2115,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.2 - - release_name: v20.1.10 major_version: v20.1 release_date: '2020-12-21' @@ -2321,7 +2126,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.9 - - release_name: v19.2.12 major_version: v19.2 release_date: '2021-01-19' @@ -2334,7 +2138,6 @@ source: true previous_release: v19.2.11 is_not_downloadable: true - - release_name: v20.2.4 major_version: v20.2 release_date: '2021-01-21' @@ -2346,7 +2149,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.3 - - release_name: v20.1.11 major_version: v20.1 release_date: '2021-01-25' @@ -2358,7 +2160,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.10 - - release_name: v21.1.0-alpha.2 major_version: v21.1 release_date: '2021-02-01' @@ -2370,7 +2171,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-alpha.1 - - release_name: v21.1.0-alpha.3 major_version: v21.1 release_date: '2021-02-08' @@ -2382,7 +2182,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-alpha.2 - - release_name: v20.1.12 major_version: v20.1 release_date: '2021-02-16' @@ -2394,7 +2193,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.11 - - release_name: v20.2.5 major_version: v20.2 release_date: '2021-02-16' @@ -2406,7 +2204,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.4 - - release_name: v20.1.13 major_version: v20.1 release_date: '2021-03-15' @@ -2418,7 +2215,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.12 - - release_name: v20.2.6 major_version: v20.2 release_date: '2021-03-15' @@ -2430,7 +2226,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.5 - - release_name: v21.1.0-beta.1 major_version: v21.1 release_date: '2021-03-22' @@ -2442,7 +2237,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-alpha.3 - - release_name: v20.2.7 major_version: v20.2 release_date: '2021-03-29' @@ -2454,7 +2248,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.6 - - release_name: v21.1.0-beta.2 major_version: v21.1 release_date: '2021-03-30' @@ -2466,7 +2259,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-beta.1 - - release_name: v21.1.0-beta.3 major_version: v21.1 release_date: '2021-04-12' @@ -2478,7 +2270,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-beta.2 - - release_name: v20.1.14 major_version: v20.1 release_date: '2021-04-19' @@ -2490,7 +2281,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.13 - - release_name: v21.1.0-beta.4 major_version: v21.1 release_date: '2021-04-19' @@ -2502,7 +2292,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-beta.3 - - release_name: v20.2.8 major_version: v20.2 release_date: '2021-04-23' @@ -2514,7 +2303,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.7 - - release_name: v20.1.15 major_version: v20.1 release_date: '2021-04-26' @@ -2526,7 +2314,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.14 - - release_name: v21.1.0-beta.5 major_version: v21.1 release_date: '2021-04-29' @@ -2538,7 +2325,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-beta.4 - - release_name: v21.1.0-rc.1 major_version: v21.1 release_date: '2021-05-05' @@ -2550,7 +2336,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-beta.5 - - release_name: v20.1.16 major_version: v20.1 release_date: '2021-05-10' @@ -2562,7 +2347,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.15 - - release_name: v20.2.9 major_version: v20.2 release_date: '2021-05-10' @@ -2574,7 +2358,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.8 - - release_name: v21.1.0-rc.2 major_version: v21.1 release_date: '2021-05-10' @@ -2586,7 +2369,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.1.0-rc.1 - - release_name: v20.1.17 major_version: v20.1 release_date: '2021-05-17' @@ -2598,7 +2380,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.1.16 - - release_name: v20.2.10 major_version: v20.2 release_date: '2021-05-17' @@ -2610,7 +2391,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.9 - - release_name: v21.1.0 major_version: v21.1 release_date: '2021-05-18' @@ -2623,7 +2403,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.0-rc.2 - - release_name: v21.1.1 major_version: v21.1 release_date: '2021-05-24' @@ -2636,7 +2415,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.0 - - release_name: v21.1.2 major_version: v21.1 release_date: '2021-06-07' @@ -2649,7 +2427,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.1 - - release_name: v20.2.11 major_version: v20.2 release_date: '2021-06-14' @@ -2661,7 +2438,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.10 - - release_name: v21.1.3 major_version: v21.1 release_date: '2021-06-21' @@ -2674,7 +2450,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.2 - - release_name: v20.2.12 major_version: v20.2 release_date: '2021-06-28' @@ -2686,7 +2461,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.11 - - release_name: v21.1.4 major_version: v21.1 release_date: '2021-06-29' @@ -2699,7 +2473,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.3 - - release_name: v21.1.5 major_version: v21.1 release_date: '2021-07-02' @@ -2712,7 +2485,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.4 - - release_name: v20.2.13 major_version: v20.2 release_date: '2021-07-12' @@ -2724,7 +2496,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.12 - - release_name: v21.1.6 major_version: v21.1 release_date: '2021-07-20' @@ -2737,7 +2508,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.5 - - release_name: v21.1.7 major_version: v21.1 release_date: '2021-08-09' @@ -2750,7 +2520,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.6 - - release_name: v20.2.14 major_version: v20.2 release_date: '2021-08-16' @@ -2762,7 +2531,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.13 - - release_name: v20.2.15 major_version: v20.2 release_date: '2021-08-23' @@ -2774,7 +2542,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.14 - - release_name: v21.1.8 major_version: v21.1 release_date: '2021-08-30' @@ -2788,7 +2555,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.7 - - release_name: v20.2.16 major_version: v20.2 release_date: '2021-09-13' @@ -2800,7 +2566,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.15 - - release_name: v21.1.9 major_version: v21.1 release_date: '2021-09-20' @@ -2813,7 +2578,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.8 - - release_name: v21.2.0-beta.1 major_version: v21.2 release_date: '2021-09-24' @@ -2824,7 +2588,6 @@ docker: docker_image: cockroachdb/cockroach-unstable source: true - - release_name: v21.2.0-beta.2 major_version: v21.2 release_date: '2021-09-27' @@ -2836,7 +2599,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.2.0-beta.1 - - release_name: v21.2.0-beta.3 major_version: v21.2 release_date: '2021-10-04' @@ -2848,7 +2610,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.2.0-beta.2 - - release_name: v21.1.10 major_version: v21.1 release_date: '2021-10-07' @@ -2861,7 +2622,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.9 - - release_name: v20.2.17 major_version: v20.2 release_date: '2021-10-11' @@ -2873,7 +2633,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.16 - - release_name: v21.2.0-beta.4 major_version: v21.2 release_date: '2021-10-11' @@ -2885,7 +2644,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.2.0-beta.3 - - release_name: v21.1.11 major_version: v21.1 release_date: '2021-10-18' @@ -2898,7 +2656,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.10 - - release_name: v21.2.0-rc.1 major_version: v21.2 release_date: '2021-10-18' @@ -2910,7 +2667,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.2.0-beta.4 - - release_name: v21.2.0-rc.2 major_version: v21.2 release_date: '2021-10-25' @@ -2922,7 +2678,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.2.0-rc.1 - - release_name: v21.2.0-rc.3 major_version: v21.2 release_date: '2021-11-01' @@ -2934,7 +2689,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v21.2.0-rc.2 - - release_name: v20.2.18 major_version: v20.2 release_date: '2021-11-08' @@ -2946,7 +2700,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.17 - - release_name: v21.1.12 major_version: v21.1 release_date: '2021-11-15' @@ -2959,7 +2712,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.11 - - release_name: v21.2.0 major_version: v21.2 release_date: '2021-11-16' @@ -2972,7 +2724,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.0-rc.3 - - release_name: v21.2.1 major_version: v21.2 release_date: '2021-11-29' @@ -2985,7 +2736,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.0 - - release_name: v21.2.2 major_version: v21.2 release_date: '2021-12-01' @@ -2998,7 +2748,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.1 - - release_name: v21.2.3 major_version: v21.2 release_date: '2021-12-13' @@ -3011,7 +2760,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.2 - - release_name: v21.1.13 major_version: v21.1 release_date: '2022-01-10' @@ -3024,7 +2772,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.12 - - release_name: v21.2.4 major_version: v21.2 release_date: '2022-01-10' @@ -3037,7 +2784,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.3 - - release_name: v22.1.0-alpha.1 major_version: v22.1 release_date: '2022-01-24' @@ -3048,7 +2794,6 @@ docker: docker_image: cockroachdb/cockroach-unstable source: true - - release_name: v21.2.5 major_version: v21.2 release_date: '2022-02-07' @@ -3061,7 +2806,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.4 - - release_name: v21.1.14 major_version: v21.1 release_date: '2022-02-09' @@ -3074,7 +2818,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.13 - - release_name: v20.2.19 major_version: v20.2 release_date: '2022-02-09' @@ -3086,7 +2829,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v20.2.18 - - release_name: v21.1.15 major_version: v21.1 release_date: '2022-02-14' @@ -3099,7 +2841,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.14 - - release_name: v21.2.6 major_version: v21.2 release_date: '2022-02-22' @@ -3112,7 +2853,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.5 - - release_name: v21.1.16 major_version: v21.1 release_date: '2022-03-07' @@ -3125,7 +2865,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.15 - - release_name: v22.1.0-alpha.2 major_version: v22.1 release_date: '2022-03-07' @@ -3137,7 +2876,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-alpha.1 - - release_name: v22.1.0-alpha.3 major_version: v22.1 release_date: '2022-03-14' @@ -3149,7 +2887,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-alpha.2 - - release_name: v21.2.7 major_version: v21.2 release_date: '2022-03-14' @@ -3162,7 +2899,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.6 - - release_name: v22.1.0-alpha.4 major_version: v22.1 release_date: '2022-03-21' @@ -3174,7 +2910,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-alpha.3 - - release_name: v22.1.0-alpha.5 major_version: v22.1 release_date: '2022-03-28' @@ -3186,7 +2921,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-alpha.4 - - release_name: v21.1.17 major_version: v21.1 release_date: '2022-04-04' @@ -3199,7 +2933,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.16 - - release_name: v22.1.0-beta.1 major_version: v22.1 release_date: '2022-04-04' @@ -3211,7 +2944,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-alpha.5 - - release_name: v21.2.8 major_version: v21.2 release_date: '2022-04-04' @@ -3224,7 +2956,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.7 - - release_name: v22.1.0-beta.2 major_version: v22.1 release_date: '2022-04-12' @@ -3237,7 +2968,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-beta.1 - - release_name: v21.1.18 major_version: v21.1 release_date: '2022-04-12' @@ -3250,7 +2980,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.17 - - release_name: v21.2.9 major_version: v21.2 release_date: '2022-04-13' @@ -3263,7 +2992,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.8 - - release_name: v22.1.0-beta.3 major_version: v22.1 release_date: '2022-04-18' @@ -3276,7 +3004,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-beta.2 - - release_name: v22.1.0-beta.4 major_version: v22.1 release_date: '2022-04-26' @@ -3289,7 +3016,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-beta.3 - - release_name: v21.2.10 major_version: v21.2 release_date: '2022-05-02' @@ -3302,7 +3028,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.9 - - release_name: v22.1.0-beta.5 major_version: v22.1 release_date: '2022-05-03' @@ -3315,7 +3040,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-beta.4 - - release_name: v22.1.0-rc.1 major_version: v22.1 release_date: '2022-05-09' @@ -3328,7 +3052,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.1.0-beta.5 - - release_name: v21.1.19 major_version: v21.1 release_date: '2022-05-09' @@ -3341,7 +3064,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.18 - - release_name: v21.2.11 major_version: v21.2 release_date: '2022-05-23' @@ -3354,7 +3076,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.10 - - release_name: v22.1.0 major_version: v22.1 release_date: '2022-05-24' @@ -3368,7 +3089,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.0-rc.1 - - release_name: v22.1.1 major_version: v22.1 release_date: '2022-06-06' @@ -3382,7 +3102,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.0 - - release_name: v21.2.12 major_version: v21.2 release_date: '2022-06-06' @@ -3395,7 +3114,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.11 - - release_name: v22.1.2 major_version: v22.1 release_date: '2022-06-22' @@ -3409,7 +3127,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.1 - - release_name: v21.2.13 major_version: v21.2 release_date: '2022-07-05' @@ -3422,7 +3139,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.12 - - release_name: v22.1.3 major_version: v22.1 release_date: '2022-07-11' @@ -3436,7 +3152,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.2 - - release_name: v22.1.4 major_version: v22.1 release_date: '2022-07-19' @@ -3450,7 +3165,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.3 - - release_name: v22.1.5 major_version: v22.1 release_date: '2022-07-28' @@ -3464,7 +3178,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.4 - - release_name: v21.2.14 major_version: v21.2 release_date: '2022-08-01' @@ -3477,7 +3190,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.13 - - release_name: v22.1.6 major_version: v22.1 release_date: '2022-08-23' @@ -3490,7 +3202,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.5 - - release_name: v21.2.15 major_version: v21.2 release_date: '2022-08-29' @@ -3503,7 +3214,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.14 - - release_name: v22.2.0-alpha.1 major_version: v22.2 release_date: '2022-08-30' @@ -3521,7 +3231,6 @@ docker: docker_image: cockroachdb/cockroach-unstable source: true - - release_name: v22.2.0-alpha.2 major_version: v22.2 release_date: '2022-09-06' @@ -3540,7 +3249,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-alpha.1 - - release_name: v22.2.0-alpha.3 major_version: v22.2 release_date: '2022-09-12' @@ -3559,7 +3267,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-alpha.2 - - release_name: v21.1.21 major_version: v21.1 release_date: '2022-09-15' @@ -3572,7 +3279,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.1.19 - - release_name: v22.1.7 major_version: v22.1 release_date: '2022-09-15' @@ -3585,7 +3291,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.6 - - release_name: v22.2.0-alpha.4 major_version: v22.2 release_date: '2022-09-22' @@ -3604,7 +3309,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-alpha.3 - - release_name: v22.2.0-beta.1 major_version: v22.2 release_date: '2022-09-26' @@ -3623,7 +3327,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-alpha.4 - - release_name: v22.1.8 major_version: v22.1 release_date: '2022-09-29' @@ -3636,7 +3339,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.7 - - release_name: v21.2.16 major_version: v21.2 release_date: '2022-09-29' @@ -3649,7 +3351,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.15 - - release_name: v22.2.0-beta.2 major_version: v22.2 release_date: '2022-10-03' @@ -3668,7 +3369,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-beta.1 - - release_name: v22.2.0-beta.3 major_version: v22.2 release_date: '2022-10-10' @@ -3687,7 +3387,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-beta.2 - - release_name: v22.1.9 major_version: v22.1 release_date: '2022-10-17' @@ -3701,7 +3400,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.8 - - release_name: v22.2.0-beta.4 major_version: v22.2 release_date: '2022-10-17' @@ -3720,7 +3418,6 @@ docker_image: cockroachdb/cockroach-unstable source: true previous_release: v22.2.0-beta.3 - - release_name: v21.2.17 major_version: v21.2 release_date: '2022-10-17' @@ -3733,7 +3430,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v21.2.16 - - release_name: v22.1.10 major_version: v22.1 release_date: '2022-10-28' @@ -3747,7 +3443,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.9 - - release_name: v22.2.0-beta.5 major_version: v22.2 release_date: '2022-11-01' @@ -3771,7 +3466,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.0-beta.4 - - release_name: v22.2.0-rc.1 major_version: v22.2 release_date: '2022-11-07' @@ -3795,7 +3489,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.0-beta.5 - - release_name: v22.2.0-rc.2 major_version: v22.2 release_date: '2022-11-14' @@ -3819,7 +3512,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.0-rc.1 - - release_name: v22.1.11 major_version: v22.1 release_date: '2022-11-14' @@ -3833,7 +3525,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.10 - - release_name: v22.2.0-rc.3 major_version: v22.2 release_date: '2022-11-21' @@ -3857,7 +3548,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.0-rc.2 - - release_name: v22.2.0 major_version: v22.2 release_date: '2022-12-06' @@ -3881,7 +3571,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.0-rc.3 - - release_name: v22.1.12 major_version: v22.1 release_date: '2022-12-12' @@ -3895,7 +3584,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.11 - - release_name: v23.1.0-alpha.1 major_version: v23.1 release_date: '2022-12-19' @@ -3918,7 +3606,6 @@ docker_arm: true docker_arm_experimental: true source: true - - release_name: v22.2.1 major_version: v22.2 release_date: '2022-12-22' @@ -3942,7 +3629,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.0 - - release_name: v22.2.2 major_version: v22.2 release_date: '2023-01-04' @@ -3966,7 +3652,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.1 - - release_name: v22.1.13 major_version: v22.1 release_date: '2023-01-09' @@ -3980,7 +3665,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.12 - - release_name: v22.2.3 major_version: v22.2 release_date: '2023-01-23' @@ -4004,7 +3688,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.2 - - release_name: v22.1.14 major_version: v22.1 release_date: '2023-02-06' @@ -4018,7 +3701,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.13 - - release_name: v22.2.4 major_version: v22.2 release_date: '2023-02-13' @@ -4043,7 +3725,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.3 - - release_name: v23.1.0-alpha.2 major_version: v23.1 release_date: '2023-02-13' @@ -4067,7 +3748,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.1 - - release_name: v22.2.5 major_version: v22.2 release_date: '2023-02-16' @@ -4091,7 +3771,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.4 - - release_name: v22.1.15 major_version: v22.1 release_date: '2023-02-17' @@ -4105,7 +3784,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.14 - - release_name: v23.1.0-alpha.3 major_version: v23.1 release_date: '2023-02-21' @@ -4129,7 +3807,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.2 - - release_name: v23.1.0-alpha.4 major_version: v23.1 release_date: '2023-02-27' @@ -4153,7 +3830,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.3 - - release_name: v22.2.6 major_version: v22.2 release_date: '2023-03-03' @@ -4177,7 +3853,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.5 - - release_name: v22.1.16 major_version: v22.1 release_date: '2023-03-03' @@ -4191,7 +3866,6 @@ docker_image: cockroachdb/cockroach source: true previous_release: v22.1.15 - - release_name: v23.1.0-alpha.5 major_version: v23.1 release_date: '2023-03-06' @@ -4215,7 +3889,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.4 - - release_name: v23.1.0-alpha.6 major_version: v23.1 release_date: '2023-03-13' @@ -4239,7 +3912,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.5 - - release_name: v23.1.0-alpha.7 major_version: v23.1 release_date: '2023-03-20' @@ -4263,7 +3935,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.6 - - release_name: v22.1.17 major_version: v22.1 release_date: '2023-03-27' @@ -4285,7 +3956,6 @@ docker_arm: false source: true previous_release: v22.1.16 - - release_name: v22.1.18 major_version: v22.1 release_date: '2023-03-28' @@ -4306,7 +3976,6 @@ docker_arm: false source: true previous_release: v22.1.17 - - release_name: v22.2.7 major_version: v22.2 release_date: '2023-03-28' @@ -4330,7 +3999,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.6 - - release_name: v23.1.0-alpha.8 major_version: v23.1 release_date: '2023-03-27' @@ -4354,7 +4022,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.7 - - release_name: v23.1.0-alpha.9 major_version: v23.1 release_date: '2023-04-04' @@ -4378,7 +4045,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.8 - - release_name: v23.1.0-beta.1 major_version: v23.1 release_date: '2023-04-13' @@ -4402,7 +4068,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-alpha.9 - - release_name: v22.2.8 major_version: v22.2 release_date: '2023-04-17' @@ -4427,7 +4092,6 @@ source: true previous_release: v22.2.7 withdrawn: true - - release_name: v23.1.0-beta.2 major_version: v23.1 release_date: '2023-04-17' @@ -4451,7 +4115,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-beta.1 - - release_name: v23.1.0-beta.3 major_version: v23.1 release_date: '2023-04-19' @@ -4475,7 +4138,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-beta.2 - - release_name: v22.1.19 major_version: v22.1 release_date: '2023-04-24' @@ -4497,7 +4159,6 @@ source: true previous_release: v22.1.18 withdrawn: true - - release_name: v23.1.0-rc.1 major_version: v23.1 release_date: '2023-05-02' @@ -4521,7 +4182,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-beta.3 - - release_name: v23.1.0-rc.2 major_version: v23.1 release_date: '2023-05-04' @@ -4545,7 +4205,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0-rc.1 - - release_name: v22.2.9 major_version: v22.2 release_date: '2023-05-08' @@ -4569,7 +4228,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.8 - - release_name: v22.1.20 major_version: v22.1 release_date: '2023-05-12' @@ -4590,7 +4248,6 @@ docker_arm: false source: true previous_release: v22.1.19 - - release_name: v23.1.0 major_version: v23.1 release_date: '2023-05-15' @@ -4615,7 +4272,6 @@ source: true previous_release: v23.1.0-rc.2 withdrawn: true - - release_name: v23.1.1 major_version: v23.1 release_date: '2023-05-16' @@ -4639,7 +4295,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.0 - - release_name: v23.1.2 major_version: v23.1 release_date: '2023-05-25' @@ -4663,7 +4318,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.1 - - release_name: v22.2.10 major_version: v22.2 release_date: '2023-05-30' @@ -4687,7 +4341,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.9 - - release_name: v22.1.21 major_version: v22.1 release_date: '2023-06-05' @@ -4708,7 +4361,6 @@ docker_arm: false source: true previous_release: v22.1.20 - - release_name: v23.1.3 major_version: v23.1 release_date: '2023-06-12' @@ -4732,7 +4384,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.2 - - release_name: v23.1.4 major_version: v23.1 release_date: '2023-06-20' @@ -4756,7 +4407,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.3 - - release_name: v22.2.11 major_version: v22.2 release_date: '2023-06-27' @@ -4780,7 +4430,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.10 - - release_name: v23.1.5 major_version: v23.1 release_date: '2023-07-05' @@ -4804,7 +4453,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.4 - - release_name: v22.2.12 major_version: v22.2 release_date: '2023-07-24' @@ -4828,7 +4476,6 @@ docker_arm_experimental: true source: true previous_release: v22.2.11 - - release_name: v23.1.6 major_version: v23.1 release_date: '2023-07-24' @@ -4852,7 +4499,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.5 - - release_name: v23.1.7 major_version: v23.1 release_date: '2023-07-31' @@ -4876,7 +4522,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.6 - - release_name: v23.1.8 major_version: v23.1 release_date: '2023-08-07' @@ -4900,7 +4545,6 @@ docker_arm_experimental: true source: true previous_release: v23.1.7 - - release_name: v22.2.13 major_version: v22.2 release_date: '2023-08-08' @@ -4926,7 +4570,6 @@ docker_arm_limited_access: true source: true previous_release: v22.2.12 - - release_name: v22.1.22 major_version: v22.1 release_date: '2023-08-14' @@ -4947,7 +4590,6 @@ docker_arm: false source: true previous_release: v22.1.21 - - release_name: v23.1.9 major_version: v23.1 release_date: '2023-09-07' @@ -4974,7 +4616,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.8 - - release_name: v22.2.14 major_version: v22.2 release_date: '2023-09-14' @@ -5001,7 +4642,6 @@ docker_arm_limited_access: false source: true previous_release: v22.2.13 - - release_name: v23.1.10 major_version: v23.1 release_date: '2023-09-18' @@ -5028,7 +4668,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.9 - - release_name: v23.2.0-alpha.1 major_version: v23.2 release_date: '2023-09-26' @@ -5054,7 +4693,6 @@ docker_arm_experimental: true docker_arm_limited_access: false source: true - - release_name: v23.1.11 major_version: v23.1 release_date: '2023-10-02' @@ -5081,7 +4719,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.10 - - release_name: v23.2.0-alpha.2 major_version: v23.2 release_date: '2023-10-02' @@ -5108,7 +4745,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.1 - - release_name: v23.2.0-alpha.3 major_version: v23.2 release_date: '2023-10-10' @@ -5135,7 +4771,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.2 - - release_name: v23.2.0-alpha.4 major_version: v23.2 release_date: '2023-10-23' @@ -5162,7 +4797,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.3 - - release_name: v22.2.15 major_version: v22.2 release_date: '2023-10-23' @@ -5189,7 +4823,6 @@ docker_arm_limited_access: false source: true previous_release: v22.2.14 - - release_name: v23.2.0-alpha.5 major_version: v23.2 release_date: '2023-10-30' @@ -5216,7 +4849,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.4 - - release_name: v23.1.12 major_version: v23.1 release_date: '2023-11-13' @@ -5243,7 +4875,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.11 - - release_name: v22.2.16 major_version: v22.2 release_date: '2023-11-06' @@ -5270,7 +4901,6 @@ docker_arm_limited_access: false source: true previous_release: v22.2.15 - - release_name: v23.2.0-alpha.6 major_version: v23.2 release_date: '2023-11-07' @@ -5297,7 +4927,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.5 - - release_name: v22.2.17 major_version: v22.2 release_date: '2023-11-20' @@ -5324,7 +4953,6 @@ docker_arm_limited_access: false source: true previous_release: v22.2.16 - - release_name: v23.2.0-alpha.7 major_version: v23.2 release_date: '2023-11-20' @@ -5351,7 +4979,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.6 - - release_name: v23.2.0-beta.1 major_version: v23.2 release_date: '2023-11-27' @@ -5378,7 +5005,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-alpha.7 - - release_name: v23.2.0-beta.2 major_version: v23.2 release_date: '2023-12-08' @@ -5405,7 +5031,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-beta.1 - - release_name: v23.1.13 major_version: v23.1 release_date: '2023-12-11' @@ -5432,7 +5057,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.12 - - release_name: v23.2.0-beta.3 major_version: v23.2 release_date: '2023-12-14' @@ -5459,7 +5083,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-beta.2 - - release_name: v23.2.0-rc.1 major_version: v23.2 release_date: '2023-12-21' @@ -5486,7 +5109,6 @@ docker_arm_limited_access: true source: true previous_release: v23.2.0-beta.3 - - release_name: v23.2.0-rc.2 major_version: v23.2 release_date: '2024-01-09' @@ -5513,7 +5135,6 @@ docker_arm_limited_access: true source: true previous_release: v23.2.0-rc.1 - - release_name: v23.1.14 major_version: v23.1 release_date: '2024-01-17' @@ -5540,7 +5161,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.13 - - release_name: v23.2.0 major_version: v23.2 release_date: '2024-02-05' @@ -5567,7 +5187,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0-rc.2 - - release_name: v22.2.18 major_version: v22.2 release_date: '2024-02-08' @@ -5594,7 +5213,6 @@ docker_arm_limited_access: false source: true previous_release: v22.2.17 - - release_name: v23.2.1 major_version: v23.2 release_date: '2024-02-20' @@ -5621,7 +5239,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.0 - - release_name: v23.1.15 major_version: v23.1 release_date: '2024-02-20' @@ -5648,7 +5265,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.14 - - release_name: v22.2.19 major_version: v22.2 release_date: '2024-02-26' @@ -5675,7 +5291,6 @@ docker_arm_limited_access: false source: true previous_release: v22.2.18 - - release_name: v23.1.16 major_version: v23.1 release_date: '2024-02-27' @@ -5702,7 +5317,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.15 - - release_name: v23.2.2 major_version: v23.2 release_date: '2024-02-27' @@ -5729,7 +5343,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.1 - - release_name: v24.1.0-alpha.1 major_version: v24.1 release_date: '2024-03-07' @@ -5755,7 +5368,6 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - - release_name: v24.1.0-alpha.2 major_version: v24.1 release_date: '2024-03-11' @@ -5781,7 +5393,6 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - - release_name: v24.1.0-alpha.3 major_version: v24.1 release_date: '2024-03-18' @@ -5808,7 +5419,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-alpha.2 - - release_name: v23.1.17 major_version: v23.1 release_date: '2024-03-19' @@ -5835,7 +5445,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.16 - - release_name: v23.2.3 major_version: v23.2 release_date: '2024-03-20' @@ -5862,7 +5471,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.2 - - release_name: v24.1.0-alpha.4 major_version: v24.1 release_date: '2024-03-25' @@ -5889,7 +5497,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-alpha.3 - - release_name: v24.1.0-alpha.5 major_version: v24.1 release_date: '2024-04-01' @@ -5916,7 +5523,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-alpha.4 - - release_name: v23.1.18 major_version: v23.1 release_date: '2024-04-09' @@ -5943,7 +5549,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.17 - - release_name: v23.2.4 major_version: v23.2 release_date: '2024-04-11' @@ -5970,7 +5575,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.3 - - release_name: v24.1.0-beta.1 major_version: v24.1 release_date: '2024-04-17' @@ -5997,7 +5601,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-alpha.5 - - release_name: v23.1.19 major_version: v23.1 release_date: '2024-04-18' @@ -6024,7 +5627,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.18 - - release_name: v24.1.0-beta.2 major_version: v24.1 release_date: '2024-04-24' @@ -6051,7 +5653,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-beta.1 - - release_name: v24.1.0-beta.3 major_version: v24.1 release_date: '2024-04-30' @@ -6078,7 +5679,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-beta.2 - - release_name: v23.1.20 major_version: v23.1 release_date: '2024-05-01' @@ -6105,7 +5705,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.19 - - release_name: v23.2.5 major_version: v23.2 release_date: '2024-05-07' @@ -6132,7 +5731,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.4 - - release_name: v23.1.21 major_version: v23.1 release_date: '2024-05-07' @@ -6159,7 +5757,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.20 - - release_name: v24.1.0-rc.1 major_version: v24.1 release_date: '2024-05-08' @@ -6186,7 +5783,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-beta.3 - - release_name: v24.1.0-rc.2 major_version: v24.1 release_date: '2024-05-16' @@ -6213,7 +5809,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-rc.1 - - release_name: v24.1.0 major_version: v24.1 release_date: '2024-05-20' @@ -6240,7 +5835,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0-rc.2 - - release_name: v23.1.22 major_version: v23.1 release_date: '2024-05-23' @@ -6267,7 +5861,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.21 - - release_name: v23.2.6 major_version: v23.2 release_date: '2024-06-11' @@ -6294,7 +5887,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.5 - - release_name: v24.1.1 major_version: v24.1 release_date: '2024-06-14' @@ -6321,7 +5913,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.0 - - release_name: v23.1.23 major_version: v23.1 release_date: '2024-06-20' @@ -6348,7 +5939,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.22 - - release_name: v24.2.0-alpha.1 major_version: v24.2 release_date: '2024-07-01' @@ -6374,7 +5964,6 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - - release_name: v23.2.7 major_version: v23.2 release_date: '2024-07-02' @@ -6401,7 +5990,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.6 - - release_name: v24.1.2 major_version: v24.1 release_date: '2024-07-02' @@ -6428,7 +6016,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.1 - - release_name: v24.2.0-alpha.2 major_version: v24.2 release_date: '2024-07-10' @@ -6455,7 +6042,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.0-alpha.1 - - release_name: v23.2.8 major_version: v23.2 release_date: '2024-07-15' @@ -6482,7 +6068,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.7 - - release_name: v24.2.0-beta.1 major_version: v24.2 release_date: '2024-07-18' @@ -6509,7 +6094,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.0-alpha.2 - - release_name: v23.1.24 major_version: v23.1 release_date: '2024-07-18' @@ -6536,7 +6120,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.23 - - release_name: v24.2.0-beta.2 major_version: v24.2 release_date: '2024-07-24' @@ -6563,7 +6146,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.0-beta.1 - - release_name: v24.2.0-beta.3 major_version: v24.2 release_date: '2024-08-01' @@ -6590,7 +6172,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.0-beta.2 - - release_name: v23.2.9 major_version: v23.2 release_date: '2024-08-01' @@ -6617,7 +6198,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.8 - - release_name: v24.1.3 major_version: v24.1 release_date: '2024-08-01' @@ -6644,7 +6224,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.2 - - release_name: v24.2.0-rc.1 major_version: v24.2 release_date: '2024-08-07' @@ -6671,7 +6250,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.0-beta.3 - - release_name: v24.2.0 major_version: v24.2 release_date: '2024-08-12' @@ -6724,7 +6302,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.24 - - release_name: v23.2.10 major_version: v23.2 release_date: '2024-08-29' @@ -6751,7 +6328,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.9 - - release_name: v24.1.4 major_version: v24.1 release_date: '2024-08-29' @@ -6778,7 +6354,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.3 - - release_name: v24.2.1 major_version: v24.2 release_date: '2024-09-05' @@ -6806,7 +6381,6 @@ source: true previous_release: v24.2.0 withdrawn: true - - release_name: v23.1.26 major_version: v23.1 release_date: '2024-09-12' @@ -6833,7 +6407,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.25 - - release_name: v23.2.11 major_version: v23.2 release_date: '2024-09-16' @@ -6860,7 +6433,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.10 - - release_name: v24.2.2 major_version: v24.2 release_date: '2024-09-23' @@ -6887,7 +6459,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.1 - - release_name: v24.2.3 major_version: v24.2 release_date: '2024-09-25' @@ -6914,7 +6485,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.2 - - release_name: v24.1.5 major_version: v24.1 release_date: '2024-09-25' @@ -6941,7 +6511,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.3-261-gf629e9a3a88 - - release_name: v23.2.12 major_version: v23.2 release_date: '2024-09-25' @@ -6968,7 +6537,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.11 - - release_name: v23.1.27 major_version: v23.1 release_date: '2024-10-03' @@ -6995,8 +6563,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.26 - - - release_name: v24.3.0-alpha.1 major_version: v24.3 release_date: '2024-10-09' @@ -7022,7 +6588,6 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - - release_name: v23.1.28 major_version: v23.1 release_date: '2024-10-10' @@ -7049,7 +6614,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.27 - - release_name: v24.3.0-alpha.2 major_version: v24.3 release_date: '2024-10-14' @@ -7076,7 +6640,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0-alpha.1 - - release_name: v24.1.6 major_version: v24.1 release_date: '2024-10-17' @@ -7103,7 +6666,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.5 - - release_name: v24.2.4 major_version: v24.2 release_date: '2024-10-17' @@ -7130,7 +6692,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.3 - - release_name: v23.2.13 major_version: v23.2 release_date: '2024-10-17' @@ -7157,8 +6718,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.12 - - - release_name: v24.3.0-beta.1 major_version: v24.3 release_date: '2024-10-24' @@ -7185,8 +6744,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0-alpha.2 - - - release_name: v24.3.0-beta.2 major_version: v24.3 release_date: '2024-10-28' @@ -7213,8 +6770,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0-beta.1 - - - release_name: v23.2.14 major_version: v23.2 release_date: '2024-10-31' @@ -7241,8 +6796,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.13 - - - release_name: v24.3.0-beta.3 major_version: v24.3 release_date: '2024-11-05' @@ -7269,8 +6822,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0-beta.2 - - - release_name: v24.3.0 major_version: v24.3 release_date: '2024-11-18' @@ -7297,8 +6848,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0-rc.1 - - - release_name: v24.2.5 major_version: v24.2 release_date: '2024-11-18' @@ -7325,8 +6874,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.4 - - - release_name: v23.1.29 major_version: v23.1 release_date: '2024-11-18' @@ -7353,7 +6900,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.28 - - release_name: v24.1.7 major_version: v24.1 release_date: '2024-11-18' @@ -7380,7 +6926,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.6 - - release_name: v23.2.15 major_version: v23.2 release_date: '2024-11-15' @@ -7407,7 +6952,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.14 - - release_name: v23.2.16 major_version: v23.2 release_date: '2024-11-18' @@ -7434,7 +6978,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.15 - - release_name: v24.3.0-rc.1 major_version: v24.3 release_date: '2024-11-15' @@ -7461,7 +7004,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0-beta.3 - - release_name: v24.2.6 major_version: v24.2 release_date: '2024-12-12' @@ -7488,7 +7030,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.5 - - release_name: v24.1.8 major_version: v24.1 release_date: '2024-12-12' @@ -7515,7 +7056,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.7 - - release_name: v23.1.30 major_version: v23.1 release_date: '2024-12-12' @@ -7542,7 +7082,6 @@ docker_arm_limited_access: false source: true previous_release: v23.1.29 - - release_name: v23.2.17 major_version: v23.2 release_date: '2024-12-12' @@ -7569,7 +7108,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.16 - - release_name: v24.3.1 major_version: v24.3 release_date: '2024-12-12' @@ -7596,7 +7134,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.0 - - release_name: v25.1.0-alpha.1 major_version: v25.1 release_date: '2024-12-19' @@ -7622,7 +7159,6 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - - release_name: v24.3.2 major_version: v24.3 release_date: '2024-12-26' @@ -7649,8 +7185,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.1 - - - release_name: v24.1.9 major_version: v24.1 release_date: '2024-12-26' @@ -7677,8 +7211,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.8 - - - release_name: v24.2.7 major_version: v24.2 release_date: '2024-12-26' @@ -7705,8 +7237,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.6 - - - release_name: v23.2.18 major_version: v23.2 release_date: '2024-12-26' @@ -7733,7 +7263,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.17 - - release_name: v25.1.0-alpha.2 major_version: v25.1 release_date: '2025-01-09' @@ -7760,7 +7289,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-alpha.1 - - release_name: v24.1.10 major_version: v24.1 release_date: '2025-01-09' @@ -7787,7 +7315,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.9 - - release_name: v24.2.8 major_version: v24.2 release_date: '2025-01-09' @@ -7814,7 +7341,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.7 - - release_name: v24.3.3 major_version: v24.3 release_date: '2025-01-09' @@ -7841,7 +7367,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.2 - - release_name: v23.2.19 major_version: v23.2 release_date: '2025-01-09' @@ -7868,8 +7393,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.18 - - - release_name: v25.1.0-alpha.3 major_version: v25.1 release_date: '2025-01-15' @@ -7896,8 +7419,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-alpha.2 - - - release_name: v25.1.0-beta.1 major_version: v25.1 release_date: '2025-01-20' @@ -7924,8 +7445,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-alpha.3 - - - release_name: v25.1.0-beta.2 major_version: v25.1 release_date: '2025-01-27' @@ -7952,8 +7471,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-beta.1 - - - release_name: v24.2.9 major_version: v24.2 release_date: '2025-01-31' @@ -7980,8 +7497,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.8 - - - release_name: v24.1.11 major_version: v24.1 release_date: '2025-01-31' @@ -8008,8 +7523,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.10 - - - release_name: v24.3.4 major_version: v24.3 release_date: '2025-01-31' @@ -8036,8 +7549,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.3 - - - release_name: v25.1.0-beta.3 major_version: v25.1 release_date: '2025-02-03' @@ -8064,7 +7575,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-beta.2 - - release_name: v24.1.12 major_version: v24.1 release_date: '2025-02-06' @@ -8091,7 +7601,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.11 - - release_name: v23.2.20 major_version: v23.2 release_date: '2025-02-06' @@ -8118,7 +7627,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.19 - - release_name: v24.3.5 major_version: v24.3 release_date: '2025-02-06' @@ -8145,7 +7653,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.4 - - release_name: v24.2.10 major_version: v24.2 release_date: '2025-02-06' @@ -8172,7 +7679,6 @@ docker_arm_limited_access: false source: true previous_release: v24.2.9 - - release_name: v25.1.0-rc.1 major_version: v25.1 release_date: '2025-02-10' @@ -8199,7 +7705,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-beta.3 - - release_name: v25.1.0 major_version: v25.1 release_date: '2025-02-18' @@ -8226,8 +7731,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0-rc.1 - - - release_name: v24.3.6 major_version: v24.3 release_date: '2025-02-19' @@ -8254,8 +7757,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.5 - - - release_name: v24.1.13 major_version: v24.1 release_date: '2025-02-19' @@ -8282,8 +7783,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.12 - - - release_name: v24.3.7 major_version: v24.3 release_date: '2025-03-06' @@ -8310,8 +7809,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.6 - - - release_name: v23.2.21 major_version: v23.2 release_date: '2025-03-06' @@ -8338,8 +7835,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.20 - - - release_name: v24.1.14 major_version: v24.1 release_date: '2025-03-06' @@ -8366,8 +7861,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.13 - - - release_name: v24.3.8 major_version: v24.3 release_date: '2025-03-12' @@ -8394,8 +7887,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.7 - - - release_name: v25.1.1 major_version: v25.1 release_date: '2025-03-11' @@ -8422,8 +7913,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.0 - - - release_name: v25.1.2 major_version: v25.1 release_date: '2025-03-12' @@ -8450,9 +7939,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.1 - - - - release_name: v25.2.0-alpha.1 major_version: v25.2 release_date: '2025-03-24' @@ -8478,7 +7964,6 @@ docker_arm_experimental: false docker_arm_limited_access: false source: true - - release_name: v25.2.0-alpha.2 major_version: v25.2 release_date: '2025-03-31' @@ -8505,8 +7990,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-alpha.1 - - - release_name: v23.2.22 major_version: v23.2 release_date: '2025-04-02' @@ -8533,8 +8016,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.21 - - - release_name: v24.3.9 major_version: v24.3 release_date: '2025-04-02' @@ -8561,8 +8042,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.8 - - - release_name: v25.1.3 major_version: v25.1 release_date: '2025-04-02' @@ -8589,8 +8068,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.2 - - - release_name: v24.1.15 major_version: v24.1 release_date: '2025-04-03' @@ -8617,8 +8094,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.14 - - - release_name: v25.2.0-alpha.3 major_version: v25.2 release_date: '2025-04-07' @@ -8645,8 +8120,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-alpha.2 - - - release_name: v25.1.4 major_version: v25.1 release_date: '2025-04-09' @@ -8673,8 +8146,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.3 - - - release_name: v23.2.23 major_version: v23.2 release_date: '2025-04-09' @@ -8701,8 +8172,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.22 - - - release_name: v24.1.16 major_version: v24.1 release_date: '2025-04-09' @@ -8729,8 +8198,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.15 - - - release_name: v24.3.10 major_version: v24.3 release_date: '2025-04-09' @@ -8757,8 +8224,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.9 - - - release_name: v25.2.0-beta.1 major_version: v25.2 release_date: '2025-04-14' @@ -8785,8 +8250,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-alpha.3 - - - release_name: v25.2.0-beta.2 major_version: v25.2 release_date: '2025-04-23' @@ -8813,8 +8276,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-beta.1 - - - release_name: v23.2.24 major_version: v23.2 release_date: '2025-04-28' @@ -8841,8 +8302,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.23 - - - release_name: v24.1.17 major_version: v24.1 release_date: '2025-04-28' @@ -8869,8 +8328,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.16 - - - release_name: v24.3.11 major_version: v24.3 release_date: '2025-04-28' @@ -8897,8 +8354,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.10 - - - release_name: v25.1.5 major_version: v25.1 release_date: '2025-04-28' @@ -8925,8 +8380,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.4 - - - release_name: v25.2.0-beta.3 major_version: v25.2 release_date: '2025-04-28' @@ -8953,8 +8406,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-beta.2 - - - release_name: v24.1.18 major_version: v24.1 release_date: '2025-04-30' @@ -8981,7 +8432,6 @@ docker_arm_limited_access: false source: true previous_release: v24.1.17 - - release_name: v23.2.25 major_version: v23.2 release_date: '2025-04-30' @@ -9008,7 +8458,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.24 - - release_name: v24.3.12 major_version: v24.3 release_date: '2025-04-30' @@ -9035,8 +8484,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.11 - - - release_name: v25.1.6 major_version: v25.1 release_date: '2025-04-30' @@ -9063,8 +8510,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.5 - - - release_name: v25.2.0-rc.1 major_version: v25.2 release_date: '2025-05-11' @@ -9091,7 +8536,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-beta.3 - - release_name: v25.2.0 major_version: v25.2 release_date: '2025-05-12' @@ -9118,8 +8562,6 @@ docker_arm_limited_access: false source: true previous_release: v25.2.0-rc.1 - - - release_name: v24.3.13 major_version: v24.3 release_date: '2025-05-15' @@ -9146,8 +8588,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.12 - - - release_name: v23.2.26 major_version: v23.2 release_date: '2025-05-28' @@ -9174,8 +8614,6 @@ docker_arm_limited_access: false source: true previous_release: v23.2.25 - - - release_name: v25.1.7 major_version: v25.1 release_date: '2025-05-28' @@ -9202,8 +8640,6 @@ docker_arm_limited_access: false source: true previous_release: v25.1.6 - - - release_name: v24.3.14 major_version: v24.3 release_date: '2025-05-28' @@ -9230,8 +8666,6 @@ docker_arm_limited_access: false source: true previous_release: v24.3.13 - - - release_name: v24.1.19 major_version: v24.1 release_date: '2025-05-28' @@ -9293,3 +8727,30 @@ CockroachDB Cloud clusters. To request to upgrade a CockroachDB self-hosted cluster to this version, [contact support](https://support.cockroachlabs.com/hc/requests/new). + + +- release_name: v25.3.0-alpha.1 + major_version: v25.3 + release_date: '2025-06-09' + release_type: Testing + go_version: go1.23.7 + sha: 6843e110b2e0519a66262715c6dafe6a82be209f + has_sql_only: true + has_sha256sum: true + mac: + mac_arm: true + mac_arm_experimental: true + mac_arm_limited_access: false + windows: true + linux: + linux_arm: true + linux_arm_experimental: false + linux_arm_limited_access: false + linux_intel_fips: true + linux_arm_fips: false + docker: + docker_image: cockroachdb/cockroach-unstable + docker_arm: true + docker_arm_experimental: false + docker_arm_limited_access: false + source: true \ No newline at end of file diff --git a/src/current/_data/v25.3/metrics/available-metrics-in-metrics-list.csv b/src/current/_data/v25.3/metrics/available-metrics-in-metrics-list.csv new file mode 100644 index 00000000000..86c1a7c7e61 --- /dev/null +++ b/src/current/_data/v25.3/metrics/available-metrics-in-metrics-list.csv @@ -0,0 +1,479 @@ +metric_id +addsstable.applications +addsstable.copies +addsstable.proposals +admission.io.overload +capacity +capacity.available +capacity.reserved +capacity.used +exec.error +exec.latency +exec.success +gcbytesage +gossip.bytes.received +gossip.bytes.sent +gossip.connections.incoming +gossip.connections.outgoing +gossip.connections.refused +gossip.infos.received +gossip.infos.sent +intentage +intentbytes +intentcount +keybytes +keycount +leases.epoch +leases.error +leases.expiration +leases.success +leases.transfers.error +leases.transfers.success +livebytes +livecount +liveness.epochincrements +liveness.heartbeatfailures +liveness.heartbeatlatency +liveness.heartbeatsuccesses +liveness.livenodes +node-id +queue.consistency.pending +queue.consistency.process.failure +queue.consistency.process.success +queue.consistency.processingnanos +queue.gc.info.abortspanconsidered +queue.gc.info.abortspangcnum +queue.gc.info.abortspanscanned +queue.gc.info.clearrangefailed +queue.gc.info.clearrangesuccess +queue.gc.info.intentsconsidered +queue.gc.info.intenttxns +queue.gc.info.numkeysaffected +queue.gc.info.pushtxn +queue.gc.info.resolvesuccess +queue.gc.info.resolvetotal +queue.gc.info.transactionspangcaborted +queue.gc.info.transactionspangccommitted +queue.gc.info.transactionspangcpending +queue.gc.info.transactionspanscanned +queue.gc.pending +queue.gc.process.failure +queue.gc.process.success +queue.gc.processingnanos +queue.raftlog.pending +queue.raftlog.process.failure +queue.raftlog.process.success +queue.raftlog.processingnanos +queue.raftsnapshot.pending +queue.raftsnapshot.process.failure +queue.raftsnapshot.process.success +queue.raftsnapshot.processingnanos +queue.replicagc.pending +queue.replicagc.process.failure +queue.replicagc.process.success +queue.replicagc.processingnanos +queue.replicagc.removereplica +queue.replicate.addreplica +queue.replicate.addreplica.error +queue.replicate.addreplica.success +queue.replicate.pending +queue.replicate.process.failure +queue.replicate.process.success +queue.replicate.processingnanos +queue.replicate.purgatory +queue.replicate.rebalancereplica +queue.replicate.removedeadreplica +queue.replicate.removedeadreplica.error +queue.replicate.removedeadreplica.success +queue.replicate.removedecommissioningreplica.error +queue.replicate.removedecommissioningreplica.success +queue.replicate.removereplica +queue.replicate.removereplica.error +queue.replicate.removereplica.success +queue.replicate.replacedeadreplica.error +queue.replicate.replacedeadreplica.success +queue.replicate.replacedecommissioningreplica.error +queue.replicate.replacedecommissioningreplica.success +queue.replicate.transferlease +queue.split.pending +queue.split.process.failure +queue.split.process.success +queue.split.processingnanos +queue.tsmaintenance.pending +queue.tsmaintenance.process.failure +queue.tsmaintenance.process.success +queue.tsmaintenance.processingnanos +raft.commandsapplied +raft.heartbeats.pending +raft.process.commandcommit.latency +raft.process.logcommit.latency +raft.process.tickingnanos +raft.process.workingnanos +raft.rcvd.app +raft.rcvd.appresp +raft.rcvd.dropped +raft.rcvd.heartbeat +raft.rcvd.heartbeatresp +raft.rcvd.prevote +raft.rcvd.prevoteresp +raft.rcvd.prop +raft.rcvd.snap +raft.rcvd.timeoutnow +raft.rcvd.transferleader +raft.rcvd.vote +raft.rcvd.voteresp +raft.ticks +raftlog.behind +raftlog.truncated +range.adds +range.merges +range.raftleadertransfers +range.removes +range.snapshots.generated +range.snapshots.rcvd-bytes +range.snapshots.rebalancing.rcvd-bytes +range.snapshots.rebalancing.sent-bytes +range.snapshots.recovery.rcvd-bytes +range.snapshots.recovery.sent-bytes +range.snapshots.recv-in-progress +range.snapshots.recv-queue +range.snapshots.recv-total-in-progress +range.snapshots.send-in-progress +range.snapshots.send-queue +range.snapshots.send-total-in-progress +range.snapshots.sent-bytes +range.snapshots.unknown.rcvd-bytes +range.snapshots.unknown.sent-bytes +range.splits +rangekeybytes +rangekeycount +ranges +ranges.overreplicated +ranges.unavailable +ranges.underreplicated +rangevalbytes +rangevalcount +rebalancing.queriespersecond +rebalancing.readbytespersecond +rebalancing.readspersecond +rebalancing.requestspersecond +rebalancing.writebytespersecond +rebalancing.writespersecond +replicas +replicas.leaders +replicas.leaders_invalid_lease +replicas.leaders_not_leaseholders +replicas.leaseholders +replicas.quiescent +replicas.reserved +requests.backpressure.split +requests.slow.lease +requests.slow.raft +rocksdb.block.cache.hits +rocksdb.block.cache.misses +rocksdb.block.cache.usage +rocksdb.bloom.filter.prefix.checked +rocksdb.bloom.filter.prefix.useful +rocksdb.compactions +rocksdb.flushes +rocksdb.memtable.total-size +rocksdb.num-sstables +rocksdb.read-amplification +rocksdb.table-readers-mem-estimate +storage.keys.range-key-set.count +storage.l0-level-score +storage.l0-level-size +storage.l0-num-files +storage.l0-sublevels +storage.l1-level-score +storage.l1-level-size +storage.l2-level-score +storage.l2-level-size +storage.l3-level-score +storage.l3-level-size +storage.l4-level-score +storage.l4-level-size +storage.l5-level-score +storage.l5-level-size +storage.l6-level-score +storage.l6-level-size +storage.marked-for-compaction-files +storage.write-stalls +sysbytes +syscount +tenant.consumption.cross_region_network_ru +tenant.consumption.external_io_egress_bytes +tenant.consumption.pgwire_egress_bytes +tenant.consumption.read_batches +tenant.consumption.read_bytes +tenant.consumption.read_requests +tenant.consumption.request_units +tenant.consumption.sql_pods_cpu_seconds +tenant.consumption.write_batches +tenant.consumption.write_bytes +tenant.consumption.write_requests +timeseries.write.bytes +timeseries.write.errors +timeseries.write.samples +totalbytes +txnwaitqueue.deadlocks_total +valbytes +valcount +changefeed.aggregator_progress +changefeed.backfill_count +changefeed.backfill_pending_ranges +changefeed.checkpoint_progress +changefeed.commit_latency +changefeed.emitted_bytes +changefeed.emitted_messages +changefeed.error_retries +changefeed.failures +changefeed.lagging_ranges +changefeed.max_behind_nanos +changefeed.message_size_hist +changefeed.running +clock-offset.meannanos +clock-offset.stddevnanos +cluster.preserve-downgrade-option.last-updated +distsender.batches +distsender.batches.partial +distsender.errors.notleaseholder +distsender.rpc.sent +distsender.rpc.sent.local +distsender.rpc.sent.nextreplicaerror +jobs.auto_create_stats.currently_paused +jobs.auto_create_stats.currently_running +jobs.auto_create_stats.resume_failed +jobs.backup.currently_paused +jobs.backup.currently_running +jobs.changefeed.currently_paused +jobs.changefeed.expired_pts_records +jobs.changefeed.protected_age_sec +jobs.changefeed.resume_retry_error +jobs.create_stats.currently_running +jobs.row_level_ttl.currently_paused +jobs.row_level_ttl.currently_running +jobs.row_level_ttl.delete_duration +jobs.row_level_ttl.num_active_spans +jobs.row_level_ttl.resume_completed +jobs.row_level_ttl.resume_failed +jobs.row_level_ttl.rows_deleted +jobs.row_level_ttl.rows_selected +jobs.row_level_ttl.select_duration +jobs.row_level_ttl.span_total_duration +jobs.row_level_ttl.total_expired_rows +jobs.row_level_ttl.total_rows +physical_replication.logical_bytes +physical_replication.replicated_time_seconds +requests.slow.distsender +round-trip-latency +rpc.connection.avg_round_trip_latency +rpc.connection.failures +rpc.connection.healthy +rpc.connection.healthy_nanos +rpc.connection.heartbeats +rpc.connection.unhealthy +rpc.connection.unhealthy_nanos +schedules.BACKUP.failed +schedules.BACKUP.last-completed-time +schedules.BACKUP.protected_age_sec +schedules.BACKUP.protected_record_count +schedules.BACKUP.started +schedules.BACKUP.succeeded +schedules.scheduled-row-level-ttl-executor.failed +sql.bytesin +sql.bytesout +sql.conn.latency +sql.conns +sql.ddl.count +sql.delete.count +sql.distsql.contended_queries.count +sql.distsql.exec.latency +sql.distsql.flows.active +sql.distsql.flows.total +sql.distsql.queries.active +sql.distsql.queries.total +sql.distsql.select.count +sql.distsql.service.latency +sql.exec.latency +sql.failure.count +sql.full.scan.count +sql.guardrails.max_row_size_err.count +sql.guardrails.max_row_size_log.count +sql.insert.count +sql.mem.distsql.current +sql.mem.distsql.max +sql.mem.internal.session.current +sql.mem.internal.session.max +sql.mem.internal.txn.current +sql.mem.internal.txn.max +sql.mem.root.current +sql.mem.root.max +sql.misc.count +sql.new_conns +sql.pgwire_cancel.ignored +sql.pgwire_cancel.successful +sql.pgwire_cancel.total +sql.query.count +sql.select.count +sql.service.latency +sql.statements.active +sql.txn.abort.count +sql.txn.begin.count +sql.txn.commit.count +sql.txn.contended.count +sql.txn.latency +sql.txn.rollback.count +sql.txns.open +sql.update.count +tenant.sql_usage.cross_region_network_ru +tenant.sql_usage.estimated_cpu_seconds +tenant.sql_usage.external_io_egress_bytes +tenant.sql_usage.external_io_ingress_bytes +tenant.sql_usage.kv_request_units +tenant.sql_usage.pgwire_egress_bytes +tenant.sql_usage.provisioned_vcpus +tenant.sql_usage.read_batches +tenant.sql_usage.read_bytes +tenant.sql_usage.read_requests +tenant.sql_usage.request_units +tenant.sql_usage.sql_pods_cpu_seconds +tenant.sql_usage.write_batches +tenant.sql_usage.write_bytes +tenant.sql_usage.write_requests +txn.aborts +txn.commits +txn.commits1PC +txn.durations +txn.restarts +txn.restarts.asyncwritefailure +txn.restarts.readwithinuncertainty +txn.restarts.serializable +txn.restarts.txnaborted +txn.restarts.txnpush +txn.restarts.unknown +txn.restarts.writetooold +build.timestamp +sys.cgo.allocbytes +sys.cgo.totalbytes +sys.cgocalls +sys.cpu.combined.percent-normalized +sys.cpu.host.combined.percent-normalized +sys.cpu.sys.ns +sys.cpu.sys.percent +sys.cpu.user.ns +sys.cpu.user.percent +sys.fd.open +sys.fd.softlimit +sys.gc.count +sys.gc.pause.ns +sys.gc.pause.percent +sys.go.allocbytes +sys.go.totalbytes +sys.goroutines +sys.host.disk.iopsinprogress +sys.host.disk.read.bytes +sys.host.disk.read.count +sys.host.disk.write.bytes +sys.host.disk.write.count +sys.host.net.recv.bytes +sys.host.net.send.bytes +sys.rss +sys.runnable.goroutines.per.cpu +sys.totalmem +sys.uptime +jobs.auto_config_env_runner.currently_paused +jobs.auto_config_env_runner.protected_age_sec +jobs.auto_config_env_runner.protected_record_count +jobs.auto_config_runner.currently_paused +jobs.auto_config_runner.protected_age_sec +jobs.auto_config_runner.protected_record_count +jobs.auto_config_task.currently_paused +jobs.auto_config_task.protected_age_sec +jobs.auto_config_task.protected_record_count +jobs.auto_create_partial_stats.currently_paused +jobs.auto_create_partial_stats.protected_age_sec +jobs.auto_create_partial_stats.protected_record_count +jobs.auto_create_stats.protected_age_sec +jobs.auto_create_stats.protected_record_count +jobs.auto_schema_telemetry.currently_paused +jobs.auto_schema_telemetry.protected_age_sec +jobs.auto_schema_telemetry.protected_record_count +jobs.auto_span_config_reconciliation.currently_paused +jobs.auto_span_config_reconciliation.protected_age_sec +jobs.auto_span_config_reconciliation.protected_record_count +jobs.auto_sql_stats_compaction.currently_paused +jobs.auto_sql_stats_compaction.protected_age_sec +jobs.auto_sql_stats_compaction.protected_record_count +jobs.auto_update_sql_activity.currently_paused +jobs.auto_update_sql_activity.protected_age_sec +jobs.auto_update_sql_activity.protected_record_count +jobs.backup.protected_age_sec +jobs.backup.protected_record_count +jobs.changefeed.protected_record_count +jobs.create_stats.currently_paused +jobs.create_stats.protected_age_sec +jobs.create_stats.protected_record_count +jobs.history_retention.currently_paused +jobs.history_retention.protected_age_sec +jobs.history_retention.protected_record_count +jobs.import.currently_paused +jobs.import.protected_age_sec +jobs.import.protected_record_count +jobs.import_rollback.currently_paused +jobs.import_rollback.protected_age_sec +jobs.import_rollback.protected_record_count +jobs.key_visualizer.currently_paused +jobs.key_visualizer.protected_age_sec +jobs.key_visualizer.protected_record_count +jobs.logical_replication.currently_paused +jobs.logical_replication.protected_age_sec +jobs.logical_replication.protected_record_count +jobs.migration.currently_paused +jobs.migration.protected_age_sec +jobs.migration.protected_record_count +jobs.mvcc_statistics_update.currently_paused +jobs.mvcc_statistics_update.protected_age_sec +jobs.mvcc_statistics_update.protected_record_count +jobs.new_schema_change.currently_paused +jobs.new_schema_change.protected_age_sec +jobs.new_schema_change.protected_record_count +jobs.poll_jobs_stats.currently_paused +jobs.poll_jobs_stats.protected_age_sec +jobs.poll_jobs_stats.protected_record_count +jobs.replication_stream_ingestion.currently_paused +jobs.replication_stream_ingestion.protected_age_sec +jobs.replication_stream_ingestion.protected_record_count +jobs.replication_stream_producer.currently_paused +jobs.replication_stream_producer.protected_age_sec +jobs.replication_stream_producer.protected_record_count +jobs.restore.currently_paused +jobs.restore.protected_age_sec +jobs.restore.protected_record_count +jobs.row_level_ttl.protected_age_sec +jobs.row_level_ttl.protected_record_count +jobs.schema_change.currently_paused +jobs.schema_change.protected_age_sec +jobs.schema_change.protected_record_count +jobs.schema_change_gc.currently_paused +jobs.schema_change_gc.protected_age_sec +jobs.schema_change_gc.protected_record_count +jobs.standby_read_ts_poller.currently_paused +jobs.standby_read_ts_poller.protected_age_sec +jobs.standby_read_ts_poller.protected_record_count +jobs.typedesc_schema_change.currently_paused +jobs.typedesc_schema_change.protected_age_sec +jobs.typedesc_schema_change.protected_record_count +jobs.update_table_metadata_cache.currently_paused +jobs.update_table_metadata_cache.protected_age_sec +jobs.update_table_metadata_cache.protected_record_count +sql.crud_query.count +sql.crud_query.started.count +auth.cert.conn.latency +auth.gss.conn.latency +auth.jwt.conn.latency +auth.ldap.conn.latency +auth.password.conn.latency +auth.scram.conn.latency +sql.exec.latency.detail +sql.query.unique.count \ No newline at end of file diff --git a/src/current/_data/v25.3/metrics/available-metrics-not-in-metrics-list.csv b/src/current/_data/v25.3/metrics/available-metrics-not-in-metrics-list.csv new file mode 100644 index 00000000000..1cd86aace0a --- /dev/null +++ b/src/current/_data/v25.3/metrics/available-metrics-not-in-metrics-list.csv @@ -0,0 +1,19 @@ +metric_id,description,y-axis label,type,unit +"security.certificate.expiration.ca","Expiration for the CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.client-ca","Expiration for the client CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.client","Minimum expiration for client certificates, labeled by SQL user. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.ui-ca","Expiration for the UI CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.node","Expiration for the node certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.node-client","Expiration for the node's client certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.ui","Expiration for the UI certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.ca-client-tenant","Expiration for the Tenant Client CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.expiration.client-tenant","Expiration for the Tenant Client certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.ca","Seconds till expiration for the CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.client-ca","Seconds till expiration for the client CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.client","Seconds till expiration for the client certificates, labeled by SQL user. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.ui-ca","Seconds till expiration for the UI CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.node","Seconds till expiration for the node certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.node-client","Seconds till expiration for the node's client certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.ui","Seconds till expiration for the UI certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.ca-client-tenant","Seconds till expiration for the Tenant Client CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC +"security.certificate.ttl.client-tenant","Seconds till expiration for the Tenant Client certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC \ No newline at end of file diff --git a/src/current/_data/v25.3/metrics/metrics-list.csv b/src/current/_data/v25.3/metrics/metrics-list.csv new file mode 100644 index 00000000000..015400f1318 --- /dev/null +++ b/src/current/_data/v25.3/metrics/metrics-list.csv @@ -0,0 +1,2802 @@ +layer,metric,description,y-axis label,type,unit,aggregation,derivative +STORAGE,abortspanbytes,Number of bytes in the abort span,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,addsstable.applications,Number of SSTable ingestions applied (i.e. applied by Replicas),Ingestions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,addsstable.aswrites,"Number of SSTables ingested as normal writes. + +These AddSSTable requests do not count towards the addsstable metrics +'proposals', 'applications', or 'copies', as they are not ingested as AddSSTable +Raft commands, but rather normal write commands. However, if these requests get +throttled they do count towards 'delay.total' and 'delay.enginebackpressure'. +",Ingestions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,addsstable.copies,number of SSTable ingestions that required copying files during application,Ingestions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,addsstable.delay.enginebackpressure,Amount by which evaluation of AddSSTable requests was delayed by storage-engine backpressure,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,addsstable.delay.total,Amount by which evaluation of AddSSTable requests was delayed,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,addsstable.proposals,Number of SSTable ingestions proposed (i.e. sent to Raft by lease holders),Ingestions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.elastic-cpu,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.elastic-cpu.bulk-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.elastic-cpu.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.elastic-stores,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.elastic-stores.bulk-low-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.elastic-stores.bulk-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv-stores,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv-stores.high-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv-stores.locking-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv-stores.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv-stores.user-high-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv.high-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv.locking-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.kv.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-kv-response,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-kv-response.locking-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-kv-response.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-leaf-start,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-leaf-start.locking-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-leaf-start.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-root-start,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-root-start.locking-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-root-start.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-sql-response,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-sql-response.locking-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.admitted.sql-sql-response.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.elastic_cpu.acquired_nanos,Total CPU nanoseconds acquired by elastic work,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.elastic_cpu.available_nanos,Instantaneous available CPU nanoseconds per second ignoring utilization limit,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,admission.elastic_cpu.max_available_nanos,Maximum available CPU nanoseconds per second ignoring utilization limit,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.elastic_cpu.nanos_exhausted_duration,"Total duration when elastic CPU nanoseconds were exhausted, in micros",Microseconds,GAUGE,COUNT,AVG,NONE +STORAGE,admission.elastic_cpu.over_limit_durations,Measurement of how much over the prescribed limit elastic requests ran (not recorded if requests don't run over),Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.elastic_cpu.pre_work_nanos,"Total CPU nanoseconds spent doing pre-work, before doing elastic work",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.elastic_cpu.returned_nanos,Total CPU nanoseconds returned by elastic work,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.elastic_cpu.utilization,CPU utilization by elastic work,CPU Time,GAUGE,PERCENT,AVG,NONE +STORAGE,admission.elastic_cpu.utilization_limit,Utilization limit set for the elastic CPU work,CPU Time,GAUGE,PERCENT,AVG,NONE +STORAGE,admission.errored.elastic-cpu,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.elastic-cpu.bulk-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.elastic-cpu.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.elastic-stores,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.elastic-stores.bulk-low-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.elastic-stores.bulk-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv-stores,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv-stores.high-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv-stores.locking-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv-stores.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv-stores.user-high-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv.high-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv.locking-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.kv.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-kv-response,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-kv-response.locking-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-kv-response.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-leaf-start,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-leaf-start.locking-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-leaf-start.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-root-start,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-root-start.locking-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-root-start.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-sql-response,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-sql-response.locking-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.errored.sql-sql-response.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.cpu_load_long_period_duration.kv,"Total duration when CPULoad was being called with a long period, in micros",Microseconds,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.cpu_load_short_period_duration.kv,"Total duration when CPULoad was being called with a short period, in micros",Microseconds,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.elastic_io_tokens_available.kv,Number of tokens available,Tokens,GAUGE,COUNT,AVG,NONE +STORAGE,admission.granter.elastic_io_tokens_exhausted_duration.kv,"Total duration when Elastic IO tokens were exhausted, in micros",Microseconds,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.io_tokens_available.kv,Number of tokens available,Tokens,GAUGE,COUNT,AVG,NONE +STORAGE,admission.granter.io_tokens_bypassed.kv,"Total number of tokens taken by work bypassing admission control (for example, follower writes without flow control)",Tokens,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.io_tokens_exhausted_duration.kv,"Total duration when IO tokens were exhausted, in micros",Microseconds,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.io_tokens_returned.kv,Total number of tokens returned,Tokens,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.io_tokens_taken.kv,Total number of tokens taken,Tokens,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.slot_adjuster_decrements.kv,Number of decrements of the total KV slots,Slots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.slot_adjuster_increments.kv,Number of increments of the total KV slots,Slots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.slots_exhausted_duration.kv,"Total duration when KV slots were exhausted, in micros",Microseconds,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.granter.total_slots.kv,Total slots for kv work,Slots,GAUGE,COUNT,AVG,NONE +STORAGE,admission.granter.used_slots.kv,Used slots,Slots,GAUGE,COUNT,AVG,NONE +STORAGE,admission.granter.used_slots.sql-leaf-start,Used slots,Slots,GAUGE,COUNT,AVG,NONE +STORAGE,admission.granter.used_slots.sql-root-start,Used slots,Slots,GAUGE,COUNT,AVG,NONE +STORAGE,admission.io.overload,1-normalized float indicating whether IO admission control considers the store as overloaded with respect to compaction out of L0 (considers sub-level and file counts).,Threshold,GAUGE,PERCENT,AVG,NONE +STORAGE,admission.l0_compacted_bytes.kv,Total bytes compacted out of L0 (used to generate IO tokens),Tokens,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.l0_tokens_produced.kv,Total bytes produced for L0 writes,Tokens,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.raft.paused_replicas,"Number of followers (i.e. Replicas) to which replication is currently paused to help them recover from I/O overload. + +Such Replicas will be ignored for the purposes of proposal quota, and will not +receive replication traffic. They are essentially treated as offline for the +purpose of replication. This serves as a crude form of admission control. + +The count is emitted by the leaseholder of each range.",Followers,GAUGE,COUNT,AVG,NONE +STORAGE,admission.raft.paused_replicas_dropped_msgs,"Number of messages dropped instead of being sent to paused replicas. + +The messages are dropped to help these replicas to recover from I/O overload.",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.elastic-cpu,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.elastic-cpu.bulk-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.elastic-cpu.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.elastic-stores,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.elastic-stores.bulk-low-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.elastic-stores.bulk-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv-stores,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv-stores.high-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv-stores.locking-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv-stores.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv-stores.user-high-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv.high-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv.locking-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.kv.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-kv-response,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-kv-response.locking-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-kv-response.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-leaf-start,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-leaf-start.locking-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-leaf-start.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-root-start,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-root-start.locking-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-root-start.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-sql-response,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-sql-response.locking-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.requested.sql-sql-response.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,admission.scheduler_latency_listener.p99_nanos,The scheduling latency at p99 as observed by the scheduler latency listener,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.elastic-cpu,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.elastic-cpu.bulk-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.elastic-cpu.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.elastic-stores,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.elastic-stores.bulk-low-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.elastic-stores.bulk-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv-stores,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv-stores.high-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv-stores.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv-stores.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv-stores.user-high-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv.high-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.kv.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.snapshot_ingest,Wait time for snapshot ingest requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-kv-response,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-kv-response.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-kv-response.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-leaf-start,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-leaf-start.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-leaf-start.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-root-start,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-root-start.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-root-start.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-sql-response,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-sql-response.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_durations.sql-sql-response.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,admission.wait_queue_length.elastic-cpu,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.elastic-cpu.bulk-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.elastic-cpu.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.elastic-stores,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.elastic-stores.bulk-low-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.elastic-stores.bulk-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv-stores,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv-stores.high-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv-stores.locking-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv-stores.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv-stores.user-high-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv.high-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv.locking-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.kv.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-kv-response,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-kv-response.locking-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-kv-response.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-leaf-start,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-leaf-start.locking-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-leaf-start.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-root-start,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-root-start.locking-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-root-start.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-sql-response,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-sql-response.locking-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,admission.wait_queue_length.sql-sql-response.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,batch_requests.bytes,Total byte count of batch requests processed,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,batch_requests.cross_region.bytes,"Total byte count of batch requests processed cross region when region + tiers are configured",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,batch_requests.cross_zone.bytes,"Total byte count of batch requests processed cross zone within + the same region when region and zone tiers are configured. However, if the + region tiers are not configured, this count may also include batch data sent + between different regions. Ensuring consistent configuration of region and + zone tiers across nodes helps to accurately monitor the data transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,batch_responses.bytes,Total byte count of batch responses received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,batch_responses.cross_region.bytes,"Total byte count of batch responses received cross region when region + tiers are configured",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,batch_responses.cross_zone.bytes,"Total byte count of batch responses received cross zone within the + same region when region and zone tiers are configured. However, if the + region tiers are not configured, this count may also include batch data + received between different regions. Ensuring consistent configuration of + region and zone tiers across nodes helps to accurately monitor the data + transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,capacity,Total storage capacity,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,capacity.available,Available storage capacity,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,capacity.reserved,Capacity reserved for snapshots,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,capacity.used,Used storage capacity,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,exec.error,"Number of batch KV requests that failed to execute on this node. + +This count excludes transaction restart/abort errors. However, it will include +other errors expected during normal operation, such as ConditionFailedError. +This metric is thus not an indicator of KV health.",Batch KV Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,exec.latency,"Latency of batch KV requests (including errors) executed on this node. + +This measures requests already addressed to a single replica, from the moment +at which they arrive at the internal gRPC endpoint to the moment at which the +response (or an error) is returned. + +This latency includes in particular commit waits, conflict resolution and replication, +and end-users can easily produce high measurements via long-running transactions that +conflict with foreground traffic. This metric thus does not provide a good signal for +understanding the health of the KV layer. +",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,exec.success,"Number of batch KV requests executed successfully on this node. + +A request is considered to have executed 'successfully' if it either returns a result +or a transaction restart/abort error. +",Batch KV Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,exportrequest.delay.total,Amount by which evaluation of Export requests was delayed,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,follower_reads.success_count,Number of reads successfully processed by any replica,Read Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,gcbytesage,Cumulative age of non-live data,Age,GAUGE,SECONDS,AVG,NONE +STORAGE,gossip.bytes.received,Number of received gossip bytes,Gossip Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,gossip.bytes.sent,Number of sent gossip bytes,Gossip Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,gossip.callbacks.pending,Number of gossip callbacks waiting to be processed,Callbacks,GAUGE,COUNT,AVG,NONE +STORAGE,gossip.callbacks.pending_duration,Duration of gossip callback queueing to be processed,Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,gossip.callbacks.processed,Number of gossip callbacks processed,Callbacks,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,gossip.callbacks.processing_duration,Duration of gossip callback processing,Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,gossip.connections.incoming,Number of active incoming gossip connections,Connections,GAUGE,COUNT,AVG,NONE +STORAGE,gossip.connections.outgoing,Number of active outgoing gossip connections,Connections,GAUGE,COUNT,AVG,NONE +STORAGE,gossip.connections.refused,Number of refused incoming gossip connections,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,gossip.infos.received,Number of received gossip Info objects,Infos,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,gossip.infos.sent,Number of sent gossip Info objects,Infos,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,intentage,Cumulative age of locks,Age,GAUGE,SECONDS,AVG,NONE +STORAGE,intentbytes,Number of bytes in intent KV pairs,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,intentcount,Count of intent keys,Keys,GAUGE,COUNT,AVG,NONE +STORAGE,intentresolver.async.throttled,Number of intent resolution attempts not run asynchronously due to throttling,Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,intentresolver.finalized_txns.failed,Number of finalized transaction cleanup failures. Transaction cleanup refers to the process of resolving all of a transactions intents and then garbage collecting its transaction record.,Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,intentresolver.intents.failed,"Number of intent resolution failures. The unit of measurement is a single intent, so if a batch of intent resolution requests fails, the metric will be incremented for each request in the batch.",Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,intents.abort-attempts,Count of (point or range) non-poisoning intent abort evaluation attempts,Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,intents.poison-attempts,Count of (point or range) poisoning intent abort evaluation attempts,Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,intents.resolve-attempts,Count of (point or range) intent commit evaluation attempts,Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,keybytes,Number of bytes taken up by keys,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,keycount,Count of all keys,Keys,GAUGE,COUNT,AVG,NONE +STORAGE,kv.allocator.load_based_lease_transfers.cannot_find_better_candidate,The number times the allocator determined that the lease was on the best possible replica,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_lease_transfers.delta_not_significant,The number times the allocator determined that the delta between the existing store and the best candidate was not significant,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_lease_transfers.existing_not_overfull,The number times the allocator determined that the lease was not on an overfull store,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_lease_transfers.follow_the_workload,The number times the allocator determined that the lease should be transferred to another replica for locality.,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_lease_transfers.missing_stats_for_existing_stores,The number times the allocator was missing qps stats for the leaseholder,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_lease_transfers.should_transfer,The number times the allocator determined that the lease should be transferred to another replica for better load distribution,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_replica_rebalancing.cannot_find_better_candidate,The number times the allocator determined that the range was on the best possible stores,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_replica_rebalancing.delta_not_significant,The number times the allocator determined that the delta between an existing store and the best replacement candidate was not high enough,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_replica_rebalancing.existing_not_overfull,The number times the allocator determined that none of the range's replicas were on overfull stores,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_replica_rebalancing.missing_stats_for_existing_store,The number times the allocator was missing the qps stats for the existing store,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.allocator.load_based_replica_rebalancing.should_transfer,The number times the allocator determined that the replica should be rebalanced to another store for better load distribution,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.closed_timestamp.max_behind_nanos,Largest latency between realtime and replica max closed timestamp,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,kv.concurrency.avg_lock_hold_duration_nanos,Average lock hold duration across locks currently held in lock tables. Does not include replicated locks (intents) that are not held in memory,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,kv.concurrency.avg_lock_wait_duration_nanos,Average lock wait duration across requests currently waiting in lock wait-queues,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,kv.concurrency.latch_conflict_wait_durations,Durations in nanoseconds spent on latch acquisition waiting for conflicts with other latches,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.concurrency.lock_wait_queue_waiters,Number of requests actively waiting in a lock wait-queue,Lock-Queue Waiters,GAUGE,COUNT,AVG,NONE +STORAGE,kv.concurrency.locks,Number of active locks held in lock tables. Does not include replicated locks (intents) that are not held in memory,Locks,GAUGE,COUNT,AVG,NONE +STORAGE,kv.concurrency.locks_with_wait_queues,Number of active locks held in lock tables with active wait-queues,Locks,GAUGE,COUNT,AVG,NONE +STORAGE,kv.concurrency.max_lock_hold_duration_nanos,Maximum length of time any lock in a lock table is held. Does not include replicated locks (intents) that are not held in memory,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,kv.concurrency.max_lock_wait_duration_nanos,Maximum lock wait duration across requests currently waiting in lock wait-queues,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,kv.concurrency.max_lock_wait_queue_waiters_for_lock,Maximum number of requests actively waiting in any single lock wait-queue,Lock-Queue Waiters,GAUGE,COUNT,AVG,NONE +STORAGE,kv.loadsplitter.nosplitkey,Load-based splitter could not find a split key.,Occurrences,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.loadsplitter.popularkey,Load-based splitter could not find a split key and the most popular sampled split key occurs in >= 25% of the samples.,Occurrences,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.planning_attempts,Number of attempts at planning out probes made; in order to probe KV we need to plan out which ranges to probe;,Runs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.planning_failures,"Number of attempts at planning out probes that failed; in order to probe KV we need to plan out which ranges to probe; if planning fails, then kvprober is not able to send probes to all ranges; consider alerting on this metric as a result",Runs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.read.attempts,"Number of attempts made to read probe KV, regardless of outcome",Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.read.failures,"Number of attempts made to read probe KV that failed, whether due to error or timeout",Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.read.latency,Latency of successful KV read probes,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.prober.write.attempts,"Number of attempts made to write probe KV, regardless of outcome",Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.write.failures,"Number of attempts made to write probe KV that failed, whether due to error or timeout",Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.prober.write.latency,Latency of successful KV write probes,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.prober.write.quarantine.oldest_duration,The duration that the oldest range in the write quarantine pool has remained,Seconds,GAUGE,SECONDS,AVG,NONE +STORAGE,kv.rangefeed.budget_allocation_blocked,Number of times RangeFeed waited for budget availability,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.rangefeed.budget_allocation_failed,Number of times RangeFeed failed because memory budget was exceeded,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.rangefeed.catchup_scan_nanos,Time spent in RangeFeed catchup scan,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.rangefeed.closed_timestamp.slow_ranges,Number of ranges that have a closed timestamp lagging by more than 5x target lag. Periodically re-calculated,Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,kv.rangefeed.closed_timestamp.slow_ranges.cancelled,Number of rangefeeds that were cancelled due to a chronically lagging closed timestamp,Cancellation Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.rangefeed.closed_timestamp_max_behind_nanos,Largest latency between realtime and replica max closed timestamp for replicas that have active rangeeds on them,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,kv.rangefeed.mem_shared,Memory usage by rangefeeds,Memory,GAUGE,BYTES,AVG,NONE +STORAGE,kv.rangefeed.mem_system,Memory usage by rangefeeds on system ranges,Memory,GAUGE,BYTES,AVG,NONE +STORAGE,kv.rangefeed.processors_goroutine,Number of active RangeFeed processors using goroutines,Processors,GAUGE,COUNT,AVG,NONE +STORAGE,kv.rangefeed.processors_scheduler,Number of active RangeFeed processors using scheduler,Processors,GAUGE,COUNT,AVG,NONE +STORAGE,kv.rangefeed.registrations,Number of active RangeFeed registrations,Registrations,GAUGE,COUNT,AVG,NONE +STORAGE,kv.rangefeed.scheduled_processor.queue_timeout,Number of times the RangeFeed processor shutdown because of a queue send timeout,Failure Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.rangefeed.scheduler.normal.latency,KV RangeFeed normal scheduler latency,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.rangefeed.scheduler.normal.queue_size,Number of entries in the KV RangeFeed normal scheduler queue,Pending Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,kv.rangefeed.scheduler.system.latency,KV RangeFeed system scheduler latency,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.rangefeed.scheduler.system.queue_size,Number of entries in the KV RangeFeed system scheduler queue,Pending Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,kv.replica_circuit_breaker.num_tripped_events,Number of times the per-Replica circuit breakers tripped since process start.,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.replica_circuit_breaker.num_tripped_replicas,"Number of Replicas for which the per-Replica circuit breaker is currently tripped. + +A nonzero value indicates range or replica unavailability, and should be investigated. +Replicas in this state will fail-fast all inbound requests. +",Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,kv.replica_read_batch_evaluate.dropped_latches_before_eval,Number of times read-only batches dropped latches before evaluation.,Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.replica_read_batch_evaluate.latency,"Execution duration for evaluating a BatchRequest on the read-only path after latches have been acquired. + +A measurement is recorded regardless of outcome (i.e. also in case of an error). If internal retries occur, each instance is recorded separately.",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.replica_read_batch_evaluate.without_interleaving_iter,Number of read-only batches evaluated without an intent interleaving iter.,Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.replica_write_batch_evaluate.latency,"Execution duration for evaluating a BatchRequest on the read-write path after latches have been acquired. + +A measurement is recorded regardless of outcome (i.e. also in case of an error). If internal retries occur, each instance is recorded separately. +Note that the measurement does not include the duration for replicating the evaluated command.",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kv.split.estimated_stats,Number of splits that computed estimated MVCC stats.,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.split.total_bytes_estimates,Number of total bytes difference between the pre-split and post-split MVCC stats.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.tenant_rate_limit.current_blocked,Number of requests currently blocked by the rate limiter,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kv.tenant_rate_limit.num_tenants,Number of tenants currently being tracked,Tenants,GAUGE,COUNT,AVG,NONE +STORAGE,kv.tenant_rate_limit.read_batches_admitted,Number of read batches admitted by the rate limiter,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.tenant_rate_limit.read_bytes_admitted,Number of read bytes admitted by the rate limiter,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.tenant_rate_limit.read_requests_admitted,Number of read requests admitted by the rate limiter,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.tenant_rate_limit.write_batches_admitted,Number of write batches admitted by the rate limiter,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.tenant_rate_limit.write_bytes_admitted,Number of write bytes admitted by the rate limiter,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kv.tenant_rate_limit.write_requests_admitted,Number of write requests admitted by the rate limiter,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_blocked_stream_count,Number of replication streams with no flow tokens available for elastic requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_controller.elastic_requests_admitted,Number of elastic requests admitted by the flow controller,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_requests_bypassed,Number of elastic waiting requests that bypassed the flow controller due to disconnecting streams,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_requests_errored,Number of elastic requests that errored out while waiting for flow tokens,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_requests_waiting,Number of elastic requests waiting for flow tokens,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_controller.elastic_stream_count,Total number of replication streams for elastic requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_controller.elastic_tokens_available,"Flow tokens available for elastic requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvadmission.flow_controller.elastic_tokens_deducted,"Flow tokens deducted by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_tokens_returned,"Flow tokens returned by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_tokens_unaccounted,"Flow tokens returned by elastic requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.elastic_wait_duration,Latency histogram for time elastic requests spent waiting for flow tokens,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kvadmission.flow_controller.regular_blocked_stream_count,Number of replication streams with no flow tokens available for regular requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_controller.regular_requests_admitted,Number of regular requests admitted by the flow controller,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.regular_requests_bypassed,Number of regular waiting requests that bypassed the flow controller due to disconnecting streams,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.regular_requests_errored,Number of regular requests that errored out while waiting for flow tokens,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.regular_requests_waiting,Number of regular requests waiting for flow tokens,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_controller.regular_stream_count,Total number of replication streams for regular requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_controller.regular_tokens_available,"Flow tokens available for regular requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvadmission.flow_controller.regular_tokens_deducted,"Flow tokens deducted by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.regular_tokens_returned,"Flow tokens returned by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.regular_tokens_unaccounted,"Flow tokens returned by regular requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_controller.regular_wait_duration,Latency histogram for time regular requests spent waiting for flow tokens,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kvadmission.flow_handle.elastic_requests_admitted,Number of elastic requests admitted by the flow handle,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_handle.elastic_requests_errored,"Number of elastic requests that errored out while waiting for flow tokens, at the handle level",Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_handle.elastic_requests_waiting,"Number of elastic requests waiting for flow tokens, at the handle level",Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_handle.elastic_wait_duration,"Latency histogram for time elastic requests spent waiting for flow tokens, at the handle level",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kvadmission.flow_handle.regular_requests_admitted,Number of regular requests admitted by the flow handle,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_handle.regular_requests_errored,"Number of regular requests that errored out while waiting for flow tokens, at the handle level",Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_handle.regular_requests_waiting,"Number of regular requests waiting for flow tokens, at the handle level",Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_handle.regular_wait_duration,"Latency histogram for time regular requests spent waiting for flow tokens, at the handle level",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kvadmission.flow_handle.streams_connected,"Number of times we've connected to a stream, at the handle level",Streams,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_handle.streams_disconnected,"Number of times we've disconnected from a stream, at the handle level",Streams,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_token_dispatch.coalesced_elastic,Number of coalesced elastic flow token dispatches (where we're informing the sender of a higher log entry being admitted),Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_token_dispatch.coalesced_regular,Number of coalesced regular flow token dispatches (where we're informing the sender of a higher log entry being admitted),Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_token_dispatch.local_elastic,Number of local elastic flow token dispatches,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_token_dispatch.local_regular,Number of local regular flow token dispatches,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_token_dispatch.pending_elastic,Number of pending elastic flow token dispatches,Dispatches,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_token_dispatch.pending_nodes,Number of nodes pending flow token dispatches,Nodes,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_token_dispatch.pending_regular,Number of pending regular flow token dispatches,Dispatches,GAUGE,COUNT,AVG,NONE +STORAGE,kvadmission.flow_token_dispatch.remote_elastic,Number of remote elastic flow token dispatches,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvadmission.flow_token_dispatch.remote_regular,Number of remote regular flow token dispatches,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.elastic.duration,Latency histogram for time elastic requests spent waiting for flow tokens to evaluate,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kvflowcontrol.eval_wait.elastic.requests.admitted,Number of elastic requests admitted by the flow controller,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.elastic.requests.bypassed,Number of waiting elastic requests that bypassed the flow controller due the evaluating replica not being the leader,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.elastic.requests.errored,Number of elastic requests that errored out while waiting for flow tokens,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.elastic.requests.waiting,Number of elastic requests waiting for flow tokens,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.eval_wait.regular.duration,Latency histogram for time regular requests spent waiting for flow tokens to evaluate,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,kvflowcontrol.eval_wait.regular.requests.admitted,Number of regular requests admitted by the flow controller,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.regular.requests.bypassed,Number of waiting regular requests that bypassed the flow controller due the evaluating replica not being the leader,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.regular.requests.errored,Number of regular requests that errored out while waiting for flow tokens,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.eval_wait.regular.requests.waiting,Number of regular requests waiting for flow tokens,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.range_controller.count,"Gauge of range flow controllers currently open, this should align with the number of leaders",Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.send_queue.bytes,"Byte size of all raft entries queued for sending to followers, waiting on available elastic send tokens",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvflowcontrol.send_queue.count,"Count of all raft entries queued for sending to followers, waiting on available elastic send tokens",Bytes,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.send_queue.prevent.count,Counter of replication streams that were prevented from forming a send queue,Preventions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.send_queue.scheduled.deducted_bytes,Gauge of elastic send token bytes already deducted by replication streams waiting on the scheduler,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvflowcontrol.send_queue.scheduled.force_flush,Gauge of replication streams scheduled to force flush their send queue,Scheduled force flushes,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.eval.elastic.blocked_count,Number of eval replication streams with no flow tokens available for elastic requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.eval.elastic.total_count,Total number of eval replication streams for elastic requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.eval.regular.blocked_count,Number of eval replication streams with no flow tokens available for regular requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.eval.regular.total_count,Total number of eval replication streams for regular requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.send.elastic.blocked_count,Number of send replication streams with no flow tokens available for elastic requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.send.elastic.total_count,Total number of send replication streams for elastic requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.send.regular.blocked_count,Number of send replication streams with no flow tokens available for regular requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.streams.send.regular.total_count,Total number of send replication streams for regular requests,Count,GAUGE,COUNT,AVG,NONE +STORAGE,kvflowcontrol.tokens.eval.elastic.available,"Flow eval tokens available for elastic requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvflowcontrol.tokens.eval.elastic.deducted,"Flow eval tokens deducted by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.elastic.returned,"Flow eval tokens returned by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.elastic.returned.disconnect,"Flow eval tokens returned early by elastic due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.elastic.unaccounted,"Flow eval tokens returned by elastic requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.regular.available,"Flow eval tokens available for regular requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvflowcontrol.tokens.eval.regular.deducted,"Flow eval tokens deducted by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.regular.returned,"Flow eval tokens returned by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.regular.returned.disconnect,"Flow eval tokens returned early by regular due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.eval.regular.unaccounted,"Flow eval tokens returned by regular requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.elastic.available,"Flow send tokens available for elastic requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvflowcontrol.tokens.send.elastic.deducted,"Flow send tokens deducted by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.elastic.deducted.force_flush_send_queue,"Flow send tokens deducted by elastic requests, across all replication streams due to force flushing the stream's send queue",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.elastic.deducted.prevent_send_queue,"Flow send tokens deducted by elastic requests, across all replication streams to prevent forming a send queue",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.elastic.returned,"Flow send tokens returned by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.elastic.returned.disconnect,"Flow send tokens returned early by elastic due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.elastic.unaccounted,"Flow send tokens returned by elastic requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.regular.available,"Flow send tokens available for regular requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,kvflowcontrol.tokens.send.regular.deducted,"Flow send tokens deducted by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.regular.deducted.prevent_send_queue,"Flow send tokens deducted by regular requests, across all replication streams to prevent forming a send queue",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.regular.returned,"Flow send tokens returned by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.regular.returned.disconnect,"Flow send tokens returned early by regular due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,kvflowcontrol.tokens.send.regular.unaccounted,"Flow send tokens returned by regular requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,leases.epoch,Number of replica leaseholders using epoch-based leases,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,leases.error,Number of failed lease requests,Lease Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,leases.expiration,Number of replica leaseholders using expiration-based leases,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,leases.leader,Number of replica leaseholders using leader leases,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,leases.liveness,Number of replica leaseholders for the liveness range(s),Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,leases.preferences.less-preferred,Number of replica leaseholders which satisfy a lease preference which is not the most preferred,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,leases.preferences.violating,Number of replica leaseholders which violate lease preferences,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,leases.requests.latency,"Lease request latency (all types and outcomes, coalesced)",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,leases.success,Number of successful lease requests,Lease Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,leases.transfers.error,Number of failed lease transfers,Lease Transfers,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,leases.transfers.success,Number of successful lease transfers,Lease Transfers,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,livebytes,Number of bytes of live data (keys plus values),Storage,GAUGE,BYTES,AVG,NONE +STORAGE,livecount,Count of live keys,Keys,GAUGE,COUNT,AVG,NONE +STORAGE,liveness.epochincrements,Number of times this node has incremented its liveness epoch,Epochs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,liveness.heartbeatfailures,Number of failed node liveness heartbeats from this node,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,liveness.heartbeatlatency,Node liveness heartbeat latency,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,liveness.heartbeatsinflight,Number of in-flight liveness heartbeats from this node,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,liveness.heartbeatsuccesses,Number of successful node liveness heartbeats from this node,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,liveness.livenodes,Number of live nodes in the cluster (will be 0 if this node is not itself live),Nodes,GAUGE,COUNT,AVG,NONE +STORAGE,lockbytes,"Number of bytes taken up by replicated lock key-values (shared and exclusive strength, not intent strength)",Storage,GAUGE,BYTES,AVG,NONE +STORAGE,lockcount,"Count of replicated locks (shared, exclusive, and intent strength)",Locks,GAUGE,COUNT,AVG,NONE +STORAGE,node-id,node ID with labels for advertised RPC and HTTP addresses,Node ID,GAUGE,CONST,AVG,NONE +STORAGE,queue.consistency.pending,Number of pending replicas in the consistency checker queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.consistency.process.failure,Number of replicas which failed processing in the consistency checker queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.consistency.process.success,Number of replicas successfully processed by the consistency checker queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.consistency.processingnanos,Nanoseconds spent processing replicas in the consistency checker queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.abortspanconsidered,Number of AbortSpan entries old enough to be considered for removal,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.abortspangcnum,Number of AbortSpan entries fit for removal,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.abortspanscanned,Number of transactions present in the AbortSpan scanned from the engine,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.clearrangefailed,Number of failed ClearRange operations during GC,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.clearrangesuccess,Number of successful ClearRange operations during GC,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.enqueuehighpriority,Number of replicas enqueued for GC with high priority,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.intentsconsidered,Number of 'old' intents,Intents,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.intenttxns,Number of associated distinct transactions,Txns,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.numkeysaffected,Number of keys with GC'able data,Keys,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.numrangekeysaffected,Number of range keys GC'able,Range Keys,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.pushtxn,Number of attempted pushes,Pushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.resolvefailed,Number of cleanup intent failures during GC,Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.resolvesuccess,Number of successful intent resolutions,Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.resolvetotal,Number of attempted intent resolutions,Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionresolvefailed,Number of intent cleanup failures for local transactions during GC,Intent Resolutions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionspangcaborted,Number of GC'able entries corresponding to aborted txns,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionspangccommitted,Number of GC'able entries corresponding to committed txns,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionspangcpending,Number of GC'able entries corresponding to pending txns,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionspangcprepared,Number of GC'able entries corresponding to prepared txns,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionspangcstaging,Number of GC'able entries corresponding to staging txns,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.info.transactionspanscanned,Number of entries in transaction spans scanned from the engine,Txn Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.pending,Number of pending replicas in the MVCC GC queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.gc.process.failure,Number of replicas which failed processing in the MVCC GC queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.process.success,Number of replicas successfully processed by the MVCC GC queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.gc.processingnanos,Nanoseconds spent processing replicas in the MVCC GC queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.lease.pending,Number of pending replicas in the replica lease queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.lease.process.failure,Number of replicas which failed processing in the replica lease queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.lease.process.success,Number of replicas successfully processed by the replica lease queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.lease.processingnanos,Nanoseconds spent processing replicas in the replica lease queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.lease.purgatory,"Number of replicas in the lease queue's purgatory, awaiting lease transfer operations",Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.merge.pending,Number of pending replicas in the merge queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.merge.process.failure,Number of replicas which failed processing in the merge queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.merge.process.success,Number of replicas successfully processed by the merge queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.merge.processingnanos,Nanoseconds spent processing replicas in the merge queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.merge.purgatory,"Number of replicas in the merge queue's purgatory, waiting to become mergeable",Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.raftlog.pending,Number of pending replicas in the Raft log queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.raftlog.process.failure,Number of replicas which failed processing in the Raft log queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.raftlog.process.success,Number of replicas successfully processed by the Raft log queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.raftlog.processingnanos,Nanoseconds spent processing replicas in the Raft log queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.raftsnapshot.pending,Number of pending replicas in the Raft repair queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.raftsnapshot.process.failure,Number of replicas which failed processing in the Raft repair queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.raftsnapshot.process.success,Number of replicas successfully processed by the Raft repair queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.raftsnapshot.processingnanos,Nanoseconds spent processing replicas in the Raft repair queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicagc.pending,Number of pending replicas in the replica GC queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.replicagc.process.failure,Number of replicas which failed processing in the replica GC queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicagc.process.success,Number of replicas successfully processed by the replica GC queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicagc.processingnanos,Nanoseconds spent processing replicas in the replica GC queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicagc.removereplica,Number of replica removals attempted by the replica GC queue,Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.addnonvoterreplica,Number of non-voter replica additions attempted by the replicate queue,Replica Additions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.addreplica,Number of replica additions attempted by the replicate queue,Replica Additions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.addreplica.error,Number of failed replica additions processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.addreplica.success,Number of successful replica additions processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.addvoterreplica,Number of voter replica additions attempted by the replicate queue,Replica Additions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.nonvoterpromotions,Number of non-voters promoted to voters by the replicate queue,Promotions of Non Voters to Voters,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.pending,Number of pending replicas in the replicate queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.replicate.process.failure,Number of replicas which failed processing in the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.process.success,Number of replicas successfully processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.processingnanos,Nanoseconds spent processing replicas in the replicate queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.purgatory,"Number of replicas in the replicate queue's purgatory, awaiting allocation options",Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.replicate.rebalancenonvoterreplica,Number of non-voter replica rebalancer-initiated additions attempted by the replicate queue,Replica Additions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.rebalancereplica,Number of replica rebalancer-initiated additions attempted by the replicate queue,Replica Additions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.rebalancevoterreplica,Number of voter replica rebalancer-initiated additions attempted by the replicate queue,Replica Additions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedeadnonvoterreplica,Number of dead non-voter replica removals attempted by the replicate queue (typically in response to a node outage),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedeadreplica,Number of dead replica removals attempted by the replicate queue (typically in response to a node outage),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedeadreplica.error,Number of failed dead replica removals processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedeadreplica.success,Number of successful dead replica removals processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedeadvoterreplica,Number of dead voter replica removals attempted by the replicate queue (typically in response to a node outage),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedecommissioningnonvoterreplica,Number of decommissioning non-voter replica removals attempted by the replicate queue (typically in response to a node outage),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedecommissioningreplica,Number of decommissioning replica removals attempted by the replicate queue (typically in response to a node outage),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedecommissioningreplica.error,Number of failed decommissioning replica removals processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedecommissioningreplica.success,Number of successful decommissioning replica removals processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removedecommissioningvoterreplica,Number of decommissioning voter replica removals attempted by the replicate queue (typically in response to a node outage),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removelearnerreplica,Number of learner replica removals attempted by the replicate queue (typically due to internal race conditions),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removenonvoterreplica,Number of non-voter replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removereplica,Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removereplica.error,Number of failed replica removals processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removereplica.success,Number of successful replica removals processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.removevoterreplica,Number of voter replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition),Replica Removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.replacedeadreplica.error,Number of failed dead replica replacements processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.replacedeadreplica.success,Number of successful dead replica replacements processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.replacedecommissioningreplica.error,Number of failed decommissioning replica replacements processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.replacedecommissioningreplica.success,Number of successful decommissioning replica replacements processed by the replicate queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.transferlease,Number of range lease transfers attempted by the replicate queue,Lease Transfers,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.replicate.voterdemotions,Number of voters demoted to non-voters by the replicate queue,Demotions of Voters to Non Voters,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.split.load_based,Number of range splits due to a range being greater than the configured max range load,Range Splits,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.split.pending,Number of pending replicas in the split queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.split.process.failure,Number of replicas which failed processing in the split queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.split.process.success,Number of replicas successfully processed by the split queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.split.processingnanos,Nanoseconds spent processing replicas in the split queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.split.purgatory,"Number of replicas in the split queue's purgatory, waiting to become splittable",Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.split.size_based,Number of range splits due to a range being greater than the configured max range size,Range Splits,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.split.span_config_based,Number of range splits due to span configuration,Range Splits,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.tsmaintenance.pending,Number of pending replicas in the time series maintenance queue,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,queue.tsmaintenance.process.failure,Number of replicas which failed processing in the time series maintenance queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.tsmaintenance.process.success,Number of replicas successfully processed by the time series maintenance queue,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,queue.tsmaintenance.processingnanos,Nanoseconds spent processing replicas in the time series maintenance queue,Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.commands.pending,"Number of Raft commands proposed and pending. + +The number of Raft commands that the leaseholders are tracking as in-flight. +These commands will be periodically reproposed until they are applied or until +they fail, either unequivocally or ambiguously.",Commands,GAUGE,COUNT,AVG,NONE +STORAGE,raft.commands.proposed,"Number of Raft commands proposed. + +The number of proposals and all kinds of reproposals made by leaseholders. This +metric approximates the number of commands submitted through Raft.",Commands,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.commands.reproposed.new-lai,"Number of Raft commands re-proposed with a newer LAI. + +The number of Raft commands that leaseholders re-proposed with a modified LAI. +Such re-proposals happen for commands that are committed to Raft out of intended +order, and hence can not be applied as is.",Commands,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.commands.reproposed.unchanged,"Number of Raft commands re-proposed without modification. + +The number of Raft commands that leaseholders re-proposed without modification. +Such re-proposals happen for commands that are not committed/applied within a +timeout, and have a high chance of being dropped.",Commands,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.commandsapplied,"Number of Raft commands applied. + +This measurement is taken on the Raft apply loops of all Replicas (leaders and +followers alike), meaning that it does not measure the number of Raft commands +*proposed* (in the hypothetical extreme case, all Replicas may apply all commands +through snapshots, thus not increasing this metric at all). +Instead, it is a proxy for how much work is being done advancing the Replica +state machines on this node.",Commands,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.dropped,"Number of Raft proposals dropped (this counts individial raftpb.Entry, not raftpb.MsgProp)",Proposals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.dropped_leader,"Number of Raft proposals dropped by a Replica that believes itself to be the leader; each update also increments `raft.dropped` (this counts individial raftpb.Entry, not raftpb.MsgProp)",Proposals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.entrycache.accesses,Number of cache lookups in the Raft entry cache,Accesses,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.entrycache.bytes,Aggregate size of all Raft entries in the Raft entry cache,Entry Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,raft.entrycache.hits,Number of successful cache lookups in the Raft entry cache,Hits,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.entrycache.read_bytes,Counter of bytes in entries returned from the Raft entry cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.entrycache.size,Number of Raft entries in the Raft entry cache,Entry Count,GAUGE,COUNT,AVG,NONE +STORAGE,raft.flows.entered.state_probe,The number of leader->peer flows transitioned to StateProbe,Flows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.flows.entered.state_replicate,The number of leader->peer flows transitioned to StateReplicate,Flows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.flows.entered.state_snapshot,The number of of leader->peer flows transitioned to StateSnapshot,Flows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.flows.state_probe,Number of leader->peer flows in StateProbe,Flows,GAUGE,COUNT,AVG,NONE +STORAGE,raft.flows.state_replicate,Number of leader->peer flows in StateReplicate,Flows,GAUGE,COUNT,AVG,NONE +STORAGE,raft.flows.state_snapshot,Number of leader->peer flows in StateSnapshot,Flows,GAUGE,COUNT,AVG,NONE +STORAGE,raft.fortification.skipped_no_support,The number of fortification requests that were skipped (not sent) due to lack of store liveness support,Skipped Fortifications,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.fortification_resp.accepted,The number of accepted fortification responses. Calculated on the raft leader,Accepted Fortification Responses,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.fortification_resp.rejected,The number of rejected fortification responses. Calculated on the raft leader,Rejected Fortification Responses,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.heartbeats.pending,Number of pending heartbeats and responses waiting to be coalesced,Messages,GAUGE,COUNT,AVG,NONE +STORAGE,raft.loaded_entries.bytes,Bytes allocated by raft Storage.Entries calls that are still kept in memory,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,raft.loaded_entries.reserved.bytes,Bytes allocated by raft Storage.Entries calls that are still kept in memory,Memory,GAUGE,BYTES,AVG,NONE +STORAGE,raft.process.applycommitted.latency,"Latency histogram for applying all committed Raft commands in a Raft ready. + +This measures the end-to-end latency of applying all commands in a Raft ready. Note that +this closes over possibly multiple measurements of the 'raft.process.commandcommit.latency' +metric, which receives datapoints for each sub-batch processed in the process.",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,raft.process.commandcommit.latency,"Latency histogram for applying a batch of Raft commands to the state machine. + +This metric is misnamed: it measures the latency for *applying* a batch of +committed Raft commands to a Replica state machine. This requires only +non-durable I/O (except for replication configuration changes). + +Note that a ""batch"" in this context is really a sub-batch of the batch received +for application during raft ready handling. The +'raft.process.applycommitted.latency' histogram is likely more suitable in most +cases, as it measures the total latency across all sub-batches (i.e. the sum of +commandcommit.latency for a complete batch). +",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,raft.process.handleready.latency,"Latency histogram for handling a Raft ready. + +This measures the end-to-end-latency of the Raft state advancement loop, including: +- snapshot application +- SST ingestion +- durably appending to the Raft log (i.e. includes fsync) +- entry application (incl. replicated side effects, notably log truncation) + +These include work measured in 'raft.process.commandcommit.latency' and +'raft.process.applycommitted.latency'. However, matching percentiles of these +metrics may be *higher* than handleready, since not every handleready cycle +leads to an update of the others. For example, under tpcc-100 on a single node, +the handleready count is approximately twice the logcommit count (and logcommit +count tracks closely with applycommitted count). + +High percentile outliers can be caused by individual large Raft commands or +storage layer blips. Lower percentile (e.g. 50th) increases are often driven by +CPU exhaustion or storage layer slowdowns. +",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,raft.process.logcommit.latency,"Latency histogram for committing Raft log entries to stable storage + +This measures the latency of durably committing a group of newly received Raft +entries as well as the HardState entry to disk. This excludes any data +processing, i.e. we measure purely the commit latency of the resulting Engine +write. Homogeneous bands of p50-p99 latencies (in the presence of regular Raft +traffic), make it likely that the storage layer is healthy. Spikes in the +latency bands can either hint at the presence of large sets of Raft entries +being received, or at performance issues at the storage layer. +",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,raft.process.tickingnanos,Nanoseconds spent in store.processRaft() processing replica.Tick(),Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.process.workingnanos,"Nanoseconds spent in store.processRaft() working. + +This is the sum of the measurements passed to the raft.process.handleready.latency +histogram. +",Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.quota_pool.percent_used,Histogram of proposal quota pool utilization (0-100) per leaseholder per metrics interval,Percent,HISTOGRAM,COUNT,AVG,NONE +STORAGE,raft.rcvd.app,Number of MsgApp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.appresp,Number of MsgAppResp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.bytes,"Number of bytes in Raft messages received by this store. Note + that this does not include raft snapshot received.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.cross_region.bytes,"Number of bytes received by this store for cross region Raft messages + (when region tiers are configured). Note that this does not include raft + snapshot received.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.cross_zone.bytes,"Number of bytes received by this store for cross zone, same region + Raft messages (when region and zone tiers are configured). If region tiers + are not configured, this count may include data sent between different + regions. To ensure accurate monitoring of transmitted data, it is important + to set up a consistent locality configuration across nodes. Note that this + does not include raft snapshot received.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.defortifyleader,Number of MsgDeFortifyLeader messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.dropped,Number of incoming Raft messages dropped (due to queue length or size),Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.dropped_bytes,Bytes of dropped incoming Raft messages,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.fortifyleader,Number of MsgFortifyLeader messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.fortifyleaderresp,Number of MsgFortifyLeaderResp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.heartbeat,"Number of (coalesced, if enabled) MsgHeartbeat messages received by this store",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.heartbeatresp,"Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.prevote,Number of MsgPreVote messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.prevoteresp,Number of MsgPreVoteResp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.prop,Number of MsgProp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.queued_bytes,Number of bytes in messages currently waiting for raft processing,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,raft.rcvd.snap,Number of MsgSnap messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.stepped_bytes,"Number of bytes in messages processed by Raft. + +Messages reflected here have been handed to Raft (via RawNode.Step). This does not imply that the +messages are no longer held in memory or that IO has been performed. Raft delegates IO activity to +Raft ready handling, which occurs asynchronously. Since handing messages to Raft serializes with +Raft ready handling and size the size of an entry is dominated by the contained pebble WriteBatch, +on average the rate at which this metric increases is a good proxy for the rate at which Raft ready +handling consumes writes. +",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.timeoutnow,Number of MsgTimeoutNow messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.transferleader,Number of MsgTransferLeader messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.vote,Number of MsgVote messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.rcvd.voteresp,Number of MsgVoteResp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.replication.latency,"The duration elapsed between having evaluated a BatchRequest and it being +reflected in the proposer's state machine (i.e. having applied fully). + +This encompasses time spent in the quota pool, in replication (including +reproposals), and application, but notably *not* sequencing latency (i.e. +contention and latch acquisition). + +No measurement is recorded for read-only commands as well as read-write commands +which end up not writing (such as a DeleteRange on an empty span). Commands that +result in 'above-replication' errors (i.e. txn retries, etc) are similarly +excluded. Errors that arise while waiting for the in-flight replication result +or result from application of the command are included. + +Note also that usually, clients are signalled at beginning of application, but +the recorded measurement captures the entirety of log application. + +The duration is always measured on the proposer, even if the Raft leader and +leaseholder are not colocated, or the request is proposed from a follower. + +Commands that use async consensus will still cause a measurement that reflects +the actual replication latency, despite returning early to the client.",Latency,HISTOGRAM,COUNT,AVG,NONE +STORAGE,raft.scheduler.latency,"Queueing durations for ranges waiting to be processed by the Raft scheduler. + +This histogram measures the delay from when a range is registered with the scheduler +for processing to when it is actually processed. This does not include the duration +of processing. +",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,raft.sent.bytes,"Number of bytes in Raft messages sent by this store. Note that + this does not include raft snapshot sent.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.sent.cross_region.bytes,"Number of bytes sent by this store for cross region Raft messages + (when region tiers are configured). Note that this does not include raft + snapshot sent.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.sent.cross_zone.bytes,"Number of bytes sent by this store for cross zone, same region Raft + messages (when region and zone tiers are configured). If region tiers are + not configured, this count may include data sent between different regions. + To ensure accurate monitoring of transmitted data, it is important to set up + a consistent locality configuration across nodes. Note that this does not + include raft snapshot sent.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.storage.error,Number of Raft storage errors,Error Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.storage.read_bytes,"Counter of raftpb.Entry.Size() read from pebble for raft log entries. + +These are the bytes returned from the (raft.Storage).Entries method that were not +returned via the raft entry cache. This metric plus the raft.entrycache.read_bytes +metric represent the total bytes returned from the Entries method. + +Since pebble might serve these entries from the block cache, only a fraction of this +throughput might manifest in disk metrics. + +Entries tracked in this metric incur an unmarshalling-related CPU and memory +overhead that would not be incurred would the entries be served from the raft +entry cache. + +The bytes returned here do not correspond 1:1 to bytes read from pebble. This +metric measures the in-memory size of the raftpb.Entry, whereas we read its +encoded representation from pebble. As there is no compression involved, these +will generally be comparable. + +A common reason for elevated measurements on this metric is that a store is +falling behind on raft log application. The raft entry cache generally tracks +entries that were recently appended, so if log application falls behind the +cache will already have moved on to newer entries. +",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.ticks,Number of Raft ticks queued,Ticks,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.timeoutcampaign,Number of Raft replicas campaigning after missed heartbeats from leader,Elections called after timeout,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.transport.flow-token-dispatches-dropped,Number of flow token dispatches dropped by the Raft Transport,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.transport.rcvd,Number of Raft messages received by the Raft Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.transport.reverse-rcvd,"Messages received from the reverse direction of a stream. + +These messages should be rare. They are mostly informational, and are not actual +responses to Raft messages. Responses are received over another stream.",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.transport.reverse-sent,"Messages sent in the reverse direction of a stream. + +These messages should be rare. They are mostly informational, and are not actual +responses to Raft messages. Responses are sent over another stream.",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.transport.send-queue-bytes,"The total byte size of pending outgoing messages in the queue. + +The queue is composed of multiple bounded channels associated with different +peers. A size higher than the average baseline could indicate issues streaming +messages to at least one peer. Use this metric together with send-queue-size, to +have a fuller picture.",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,raft.transport.send-queue-size,"Number of pending outgoing messages in the Raft Transport queue. + +The queue is composed of multiple bounded channels associated with different +peers. The overall size of tens of thousands could indicate issues streaming +messages to at least one peer. Use this metric in conjunction with +send-queue-bytes.",Messages,GAUGE,COUNT,AVG,NONE +STORAGE,raft.transport.sends-dropped,Number of Raft message sends dropped by the Raft Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raft.transport.sent,Number of Raft messages sent by the Raft Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,raftlog.behind,"Number of Raft log entries followers on other stores are behind. + +This gauge provides a view of the aggregate number of log entries the Raft leaders +on this node think the followers are behind. Since a raft leader may not always +have a good estimate for this information for all of its followers, and since +followers are expected to be behind (when they are not required as part of a +quorum) *and* the aggregate thus scales like the count of such followers, it is +difficult to meaningfully interpret this metric.",Log Entries,GAUGE,COUNT,AVG,NONE +STORAGE,raftlog.truncated,Number of Raft log entries truncated,Log Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.adds,Number of range additions,Range Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.merges,Number of range merges,Range Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.raftleaderremovals,Number of times the current Raft leader was removed from a range,Raft leader removals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.raftleadertransfers,Number of raft leader transfers,Leader Transfers,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.recoveries,"Count of offline loss of quorum recovery operations performed on ranges. + +This count increments for every range recovered in offline loss of quorum +recovery operation. Metric is updated when node on which survivor replica +is located starts following the recovery.",Quorum Recoveries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.removes,Number of range removals,Range Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.applied-initial,Number of snapshots applied for initial upreplication,Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.applied-non-voter,Number of snapshots applied by non-voter replicas,Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.applied-voter,Number of snapshots applied by voter replicas,Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.cross-region.rcvd-bytes,Number of snapshot bytes received cross region,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.cross-region.sent-bytes,Number of snapshot bytes sent cross region,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.cross-zone.rcvd-bytes,"Number of snapshot bytes received cross zone within same region or if + region tiers are not configured. This count increases for each snapshot + received between different zones within the same region. However, if the + region tiers are not configured, this count may also include snapshot data + received between different regions. Ensuring consistent configuration of + region and zone tiers across nodes helps to accurately monitor the data + transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.cross-zone.sent-bytes,"Number of snapshot bytes sent cross zone within same region or if + region tiers are not configured. This count increases for each snapshot sent + between different zones within the same region. However, if the region tiers + are not configured, this count may also include snapshot data sent between + different regions. Ensuring consistent configuration of region and zone + tiers across nodes helps to accurately monitor the data transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.delegate.failures,"Number of snapshots that were delegated to a different node and +resulted in failure on that delegate. There are numerous reasons a failure can +occur on a delegate such as timeout, the delegate Raft log being too far behind +or the delegate being too busy to send. +",Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.delegate.in-progress,Number of delegated snapshots that are currently in-flight.,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.delegate.sent-bytes,"Bytes sent using a delegate. + +The number of bytes sent as a result of a delegate snapshot request +that was originated from a different node. This metric is useful in +evaluating the network savings of not sending cross region traffic. +",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.delegate.successes,"Number of snapshots that were delegated to a different node and +resulted in success on that delegate. This does not count self delegated snapshots. +",Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.generated,Number of generated snapshots,Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.rcvd-bytes,Number of snapshot bytes received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.rebalancing.rcvd-bytes,Number of rebalancing snapshot bytes received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.rebalancing.sent-bytes,Number of rebalancing snapshot bytes sent,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.recovery.rcvd-bytes,Number of raft recovery snapshot bytes received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.recovery.sent-bytes,Number of raft recovery snapshot bytes sent,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.recv-failed,"Number of range snapshot initialization messages that errored out on the recipient, typically before any data is transferred",Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.recv-in-progress,Number of non-empty snapshots being received,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.recv-queue,Number of snapshots queued to receive,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.recv-queue-bytes,Total size of all snapshots in the snapshot receive queue,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,range.snapshots.recv-total-in-progress,Number of total snapshots being received,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.recv-unusable,Number of range snapshot that were fully transmitted but determined to be unnecessary or unusable,Snapshots,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.send-in-progress,Number of non-empty snapshots being sent,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.send-queue,Number of snapshots queued to send,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.send-queue-bytes,Total size of all snapshots in the snapshot send queue,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,range.snapshots.send-total-in-progress,Number of total snapshots being sent,Snapshots,GAUGE,COUNT,AVG,NONE +STORAGE,range.snapshots.sent-bytes,Number of snapshot bytes sent,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.unknown.rcvd-bytes,Number of unknown snapshot bytes received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.unknown.sent-bytes,Number of unknown snapshot bytes sent,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.upreplication.rcvd-bytes,Number of upreplication snapshot bytes received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.snapshots.upreplication.sent-bytes,Number of upreplication snapshot bytes sent,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,range.splits,Number of range splits,Range Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rangekeybytes,Number of bytes taken up by range keys (e.g. MVCC range tombstones),Storage,GAUGE,BYTES,AVG,NONE +STORAGE,rangekeycount,Count of all range keys (e.g. MVCC range tombstones),Keys,GAUGE,COUNT,AVG,NONE +STORAGE,ranges,Number of ranges,Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,ranges.decommissioning,Number of ranges with at lease one replica on a decommissioning node,Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,ranges.overreplicated,Number of ranges with more live replicas than the replication target,Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,ranges.unavailable,Number of ranges with fewer live replicas than needed for quorum,Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,ranges.underreplicated,Number of ranges with fewer live replicas than the replication target,Ranges,GAUGE,COUNT,AVG,NONE +STORAGE,rangevalbytes,Number of bytes taken up by range key values (e.g. MVCC range tombstones),Storage,GAUGE,BYTES,AVG,NONE +STORAGE,rangevalcount,Count of all range key values (e.g. MVCC range tombstones),MVCC Values,GAUGE,COUNT,AVG,NONE +STORAGE,rebalancing.cpunanospersecond,Average CPU nanoseconds spent on processing replica operations in the last 30 minutes.,Nanoseconds/Sec,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,rebalancing.lease.transfers,Number of lease transfers motivated by store-level load imbalances,Lease Transfers,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rebalancing.queriespersecond,"Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions.",Queries/Sec,GAUGE,COUNT,AVG,NONE +STORAGE,rebalancing.range.rebalances,Number of range rebalance operations motivated by store-level load imbalances,Range Rebalances,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rebalancing.readbytespersecond,"Number of bytes read recently per second, considering the last 30 minutes.",Bytes/Sec,GAUGE,BYTES,AVG,NONE +STORAGE,rebalancing.readspersecond,"Number of keys read recently per second, considering the last 30 minutes.",Keys/Sec,GAUGE,COUNT,AVG,NONE +STORAGE,rebalancing.replicas.cpunanospersecond,Histogram of average CPU nanoseconds spent on processing replica operations in the last 30 minutes.,Nanoseconds/Sec,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,rebalancing.replicas.queriespersecond,Histogram of average kv-level requests received per second by replicas on the store in the last 30 minutes.,Queries/Sec,HISTOGRAM,COUNT,AVG,NONE +STORAGE,rebalancing.requestspersecond,"Number of requests received recently per second, considering the last 30 minutes.",Requests/Sec,GAUGE,COUNT,AVG,NONE +STORAGE,rebalancing.state.imbalanced_overfull_options_exhausted,Number of occurrences where this store was overfull but failed to shed load after exhausting available rebalance options,Overfull Options Exhausted,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rebalancing.writebytespersecond,"Number of bytes written recently per second, considering the last 30 minutes.",Bytes/Sec,GAUGE,BYTES,AVG,NONE +STORAGE,rebalancing.writespersecond,"Number of keys written (i.e. applied by raft) per second to the store, considering the last 30 minutes.",Keys/Sec,GAUGE,COUNT,AVG,NONE +STORAGE,replicas,Number of replicas,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.leaders,Number of raft leaders,Raft Leaders,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.leaders_invalid_lease,Number of replicas that are Raft leaders whose lease is invalid,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.leaders_not_fortified,Number of replicas that are not fortified Raft leaders,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.leaders_not_leaseholders,Number of replicas that are Raft leaders whose range lease is held by another store,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.leaseholders,Number of lease holders,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.quiescent,Number of quiesced replicas,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.reserved,Number of replicas reserved for snapshots,Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,replicas.uninitialized,"Number of uninitialized replicas, this does not include uninitialized replicas that can lie dormant in a persistent state.",Replicas,GAUGE,COUNT,AVG,NONE +STORAGE,requests.backpressure.split,"Number of backpressured writes waiting on a Range split. + +A Range will backpressure (roughly) non-system traffic when the range is above +the configured size until the range splits. When the rate of this metric is +nonzero over extended periods of time, it should be investigated why splits are +not occurring. +",Writes,GAUGE,COUNT,AVG,NONE +STORAGE,requests.slow.latch,"Number of requests that have been stuck for a long time acquiring latches. + +Latches moderate access to the KV keyspace for the purpose of evaluating and +replicating commands. A slow latch acquisition attempt is often caused by +another request holding and not releasing its latches in a timely manner. This +in turn can either be caused by a long delay in evaluation (for example, under +severe system overload) or by delays at the replication layer. + +This gauge registering a nonzero value usually indicates a serious problem and +should be investigated. +",Requests,GAUGE,COUNT,AVG,NONE +STORAGE,requests.slow.lease,"Number of requests that have been stuck for a long time acquiring a lease. + +This gauge registering a nonzero value usually indicates range or replica +unavailability, and should be investigated. In the common case, we also +expect to see 'requests.slow.raft' to register a nonzero value, indicating +that the lease requests are not getting a timely response from the replication +layer. +",Requests,GAUGE,COUNT,AVG,NONE +STORAGE,requests.slow.raft,"Number of requests that have been stuck for a long time in the replication layer. + +An (evaluated) request has to pass through the replication layer, notably the +quota pool and raft. If it fails to do so within a highly permissive duration, +the gauge is incremented (and decremented again once the request is either +applied or returns an error). + +A nonzero value indicates range or replica unavailability, and should be investigated. +",Requests,GAUGE,COUNT,AVG,NONE +STORAGE,rocksdb.block.cache.hits,Count of block cache hits,Cache Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.block.cache.misses,Count of block cache misses,Cache Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.block.cache.usage,Bytes used by the block cache,Memory,GAUGE,BYTES,AVG,NONE +STORAGE,rocksdb.bloom.filter.prefix.checked,Number of times the bloom filter was checked,Bloom Filter Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.bloom.filter.prefix.useful,Number of times the bloom filter helped avoid iterator creation,Bloom Filter Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.compacted-bytes-read,Bytes read during compaction,Bytes Read,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.compacted-bytes-written,Bytes written during compaction,Bytes Written,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.compactions,Number of table compactions,Compactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.encryption.algorithm,"Algorithm in use for encryption-at-rest, see ccl/storageccl/engineccl/enginepbccl/key_registry.proto",Encryption At Rest,GAUGE,CONST,AVG,NONE +STORAGE,rocksdb.estimated-pending-compaction,Estimated pending compaction bytes,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,rocksdb.flushed-bytes,Bytes written during flush,Bytes Written,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.flushes,Number of table flushes,Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.ingested-bytes,Bytes ingested,Bytes Ingested,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rocksdb.memtable.total-size,Current size of memtable in bytes,Memory,GAUGE,BYTES,AVG,NONE +STORAGE,rocksdb.num-sstables,Number of storage engine SSTables,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,rocksdb.read-amplification,Number of disk reads per query,Disk Reads per Query,GAUGE,COUNT,AVG,NONE +STORAGE,rocksdb.table-readers-mem-estimate,Memory used by index and filter blocks,Memory,GAUGE,BYTES,AVG,NONE +STORAGE,rpc.batches.recv,Number of batches processed,Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.addsstable.recv,Number of AddSSTable requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminchangereplicas.recv,Number of AdminChangeReplicas requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminmerge.recv,Number of AdminMerge requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminrelocaterange.recv,Number of AdminRelocateRange requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminscatter.recv,Number of AdminScatter requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminsplit.recv,Number of AdminSplit requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.admintransferlease.recv,Number of AdminTransferLease requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminunsplit.recv,Number of AdminUnsplit requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.adminverifyprotectedtimestamp.recv,Number of AdminVerifyProtectedTimestamp requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.barrier.recv,Number of Barrier requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.checkconsistency.recv,Number of CheckConsistency requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.clearrange.recv,Number of ClearRange requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.computechecksum.recv,Number of ComputeChecksum requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.conditionalput.recv,Number of ConditionalPut requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.delete.recv,Number of Delete requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.deleterange.recv,Number of DeleteRange requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.endtxn.recv,Number of EndTxn requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.export.recv,Number of Export requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.gc.recv,Number of GC requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.get.recv,Number of Get requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.heartbeattxn.recv,Number of HeartbeatTxn requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.increment.recv,Number of Increment requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.initput.recv,Number of InitPut requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.isspanempty.recv,Number of IsSpanEmpty requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.leaseinfo.recv,Number of LeaseInfo requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.linkexternalsstable.recv,Number of LinkExternalSSTable requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.merge.recv,Number of Merge requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.migrate.recv,Number of Migrate requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.probe.recv,Number of Probe requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.pushtxn.recv,Number of PushTxn requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.put.recv,Number of Put requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.queryintent.recv,Number of QueryIntent requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.querylocks.recv,Number of QueryLocks requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.queryresolvedtimestamp.recv,Number of QueryResolvedTimestamp requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.querytxn.recv,Number of QueryTxn requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.rangestats.recv,Number of RangeStats requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.recomputestats.recv,Number of RecomputeStats requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.recovertxn.recv,Number of RecoverTxn requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.refresh.recv,Number of Refresh requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.refreshrange.recv,Number of RefreshRange requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.requestlease.recv,Number of RequestLease requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.resolveintent.recv,Number of ResolveIntent requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.resolveintentrange.recv,Number of ResolveIntentRange requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.reversescan.recv,Number of ReverseScan requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.revertrange.recv,Number of RevertRange requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.scan.recv,Number of Scan requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.subsume.recv,Number of Subsume requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.transferlease.recv,Number of TransferLease requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.truncatelog.recv,Number of TruncateLog requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.method.writebatch.recv,Number of WriteBatch requests processed,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,rpc.streams.mux_rangefeed.active,Number of currently running MuxRangeFeed streams,Streams,GAUGE,COUNT,AVG,NONE +STORAGE,rpc.streams.mux_rangefeed.recv,Total number of MuxRangeFeed streams,Streams,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,spanconfig.kvsubscriber.oldest_protected_record_nanos,Difference between the current time and the oldest protected timestamp (sudden drops indicate a record being released; an ever increasing number indicates that the oldest record is around and preventing GC if > configured GC TTL),Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,spanconfig.kvsubscriber.protected_record_count,"Number of protected timestamp records, as seen by KV",Records,GAUGE,COUNT,AVG,NONE +STORAGE,spanconfig.kvsubscriber.update_behind_nanos,Difference between the current time and when the KVSubscriber received its last update (an ever increasing number indicates that we're no longer receiving updates),Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,storage.batch-commit.commit-wait.duration,"Cumulative time spent waiting for WAL sync, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.count,Count of batch commits. See storage.AggregatedBatchCommitStats for details.,Commit Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.duration,Cumulative time spent in batch commit. See storage.AggregatedBatchCommitStats for details.,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.l0-stall.duration,"Cumulative time spent in a write stall due to high read amplification in L0, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.mem-stall.duration,"Cumulative time spent in a write stall due to too many memtables, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.sem-wait.duration,"Cumulative time spent in semaphore wait, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.wal-queue-wait.duration,"Cumulative time spent waiting for memory blocks in the WAL queue, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.batch-commit.wal-rotation.duration,"Cumulative time spent waiting for WAL rotation, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.block-load.active,The number of sstable block loads currently in progress,Block loads,GAUGE,COUNT,AVG,NONE +STORAGE,storage.block-load.queued,The cumulative number of SSTable block loads that were delayed because too many loads were active (see also: `storage.block_load.node_max_active`),Block loads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.checkpoints,"The number of checkpoint directories found in storage. + +This is the number of directories found in the auxiliary/checkpoints directory. +Each represents an immutable point-in-time storage engine checkpoint. They are +cheap (consisting mostly of hard links), but over time they effectively become a +full copy of the old state, which increases their relative cost. Checkpoints +must be deleted once acted upon (e.g. copied elsewhere or investigated). + +A likely cause of having a checkpoint is that one of the ranges in this store +had inconsistent data among its replicas. Such checkpoint directories are +located in auxiliary/checkpoints/rN_at_M, where N is the range ID, and M is the +Raft applied index at which this checkpoint was taken.",Directories,GAUGE,COUNT,AVG,NONE +STORAGE,storage.compactions.cancelled.bytes,Cumulative volume of data written to sstables during compactions that were ultimately cancelled due to a conflicting operation.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.compactions.cancelled.count,Cumulative count of compactions that were cancelled before they completed due to a conflicting operation.,Compactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.compactions.duration,"Cumulative sum of all compaction durations. + +The rate of this value provides the effective compaction concurrency of a store, +which can be useful to determine whether the maximum compaction concurrency is +fully utilized.",Processing Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.compactions.keys.pinned.bytes,"Cumulative size of storage engine KVs written to sstables during flushes and compactions due to open LSM snapshots. + +Various subsystems of CockroachDB take LSM snapshots to maintain a consistent view +of the database over an extended duration. In order to maintain the consistent view, +flushes and compactions within the storage engine must preserve keys that otherwise +would have been dropped. This increases write amplification, and introduces keys +that must be skipped during iteration. This metric records the cumulative number of +bytes preserved during flushes and compactions over the lifetime of the process. +",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.compactions.keys.pinned.count,"Cumulative count of storage engine KVs written to sstables during flushes and compactions due to open LSM snapshots. + +Various subsystems of CockroachDB take LSM snapshots to maintain a consistent view +of the database over an extended duration. In order to maintain the consistent view, +flushes and compactions within the storage engine must preserve keys that otherwise +would have been dropped. This increases write amplification, and introduces keys +that must be skipped during iteration. This metric records the cumulative count of +KVs preserved during flushes and compactions over the lifetime of the process. +",Keys,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk-slow,Number of instances of disk operations taking longer than 10s,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk-stalled,Number of instances of disk operations taking longer than 20s,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.io.time,Time spent reading from or writing to the store's disk since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.iopsinprogress,IO operations currently in progress on the store's disk (as reported by the OS),Operations,GAUGE,COUNT,AVG,NONE +STORAGE,storage.disk.read-max.bytespersecond,Maximum rate at which bytes were read from disk (as reported by the OS),Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.disk.read.bytes,Bytes read from the store's disk since this process started (as reported by the OS),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.read.count,Disk read operations on the store's disk since this process started (as reported by the OS),Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.read.time,Time spent reading from the store's disk since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.weightedio.time,Weighted time spent reading from or writing to the store's disk since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.write-max.bytespersecond,Maximum rate at which bytes were written to disk (as reported by the OS),Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.disk.write.bytes,Bytes written to the store's disk since this process started (as reported by the OS),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.write.count,Disk write operations on the store's disk since this process started (as reported by the OS),Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.disk.write.time,Time spent writing to the store's disks since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.flush.ingest.count,Flushes performing an ingest (flushable ingestions),Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.flush.ingest.table.bytes,Bytes ingested via flushes (flushable ingestions),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.flush.ingest.table.count,Tables ingested via flushes (flushable ingestions),Tables,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.flush.utilization,The percentage of time the storage engine is actively flushing memtables to disk.,Flush Utilization,GAUGE,PERCENT,AVG,NONE +STORAGE,storage.ingest.count,Number of successful ingestions performed,Events,GAUGE,COUNT,AVG,NONE +STORAGE,storage.iterator.block-load.bytes,Bytes loaded by storage engine iterators (possibly cached). See storage.AggregatedIteratorStats for details.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.block-load.cached-bytes,Bytes loaded by storage engine iterators from the block cache. See storage.AggregatedIteratorStats for details.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.block-load.read-duration,Cumulative time storage engine iterators spent loading blocks from durable storage. See storage.AggregatedIteratorStats for details.,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-backup.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-backup.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-backup.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-batch-eval.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-batch-eval.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-batch-eval.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-crdb-unknown.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-crdb-unknown.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-crdb-unknown.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-intent-resolution.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-intent-resolution.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-intent-resolution.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-mvcc-gc.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-mvcc-gc.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-mvcc-gc.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-compaction.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-compaction.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-compaction.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-get.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-get.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-get.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-ingest.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-ingest.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-pebble-ingest.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-range-snap.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-range-snap.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-range-snap.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-rangefeed.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-rangefeed.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-rangefeed.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-replication.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-replication.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-replication.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-scan-background.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-scan-background.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-scan-background.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-scan-regular.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-scan-regular.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-scan-regular.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-unknown.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-unknown.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.category-unknown.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.external.seeks,Cumulative count of seeks performed on storage engine iterators. See storage.AggregatedIteratorStats for details.,Iterator Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.external.steps,Cumulative count of steps performed on storage engine iterators. See storage.AggregatedIteratorStats for details.,Iterator Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.internal.seeks,"Cumulative count of seeks performed internally within storage engine iterators. + +A value high relative to 'storage.iterator.external.seeks' +is a good indication that there's an accumulation of garbage +internally within the storage engine. + +See storage.AggregatedIteratorStats for details.",Iterator Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.iterator.internal.steps,"Cumulative count of steps performed internally within storage engine iterators. + +A value high relative to 'storage.iterator.external.steps' +is a good indication that there's an accumulation of garbage +internally within the storage engine. + +See storage.AggregatedIteratorStats for more details.",Iterator Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.keys.range-key-set.count,Approximate count of RangeKeySet internal keys across the storage engine.,Keys,GAUGE,COUNT,AVG,NONE +STORAGE,storage.keys.tombstone.count,"Approximate count of DEL, SINGLEDEL and RANGEDEL internal keys across the storage engine.",Keys,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l0-bytes-flushed,Number of bytes flushed (from memtables) into Level 0,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l0-bytes-ingested,Number of bytes ingested directly into Level 0,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l0-level-score,Compaction score of level 0,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l0-level-size,Size of the SSTables in level 0,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.l0-num-files,Number of SSTables in Level 0,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l0-sublevels,Number of Level 0 sublevels,Sublevels,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l1-bytes-ingested,Number of bytes ingested directly into Level 1,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l1-level-score,Compaction score of level 1,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l1-level-size,Size of the SSTables in level 1,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.l2-bytes-ingested,Number of bytes ingested directly into Level 2,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l2-level-score,Compaction score of level 2,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l2-level-size,Size of the SSTables in level 2,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.l3-bytes-ingested,Number of bytes ingested directly into Level 3,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l3-level-score,Compaction score of level 3,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l3-level-size,Size of the SSTables in level 3,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.l4-bytes-ingested,Number of bytes ingested directly into Level 4,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l4-level-score,Compaction score of level 4,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l4-level-size,Size of the SSTables in level 4,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.l5-bytes-ingested,Number of bytes ingested directly into Level 5,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l5-level-score,Compaction score of level 5,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l5-level-size,Size of the SSTables in level 5,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.l6-bytes-ingested,Number of bytes ingested directly into Level 6,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.l6-level-score,Compaction score of level 6,Score,GAUGE,COUNT,AVG,NONE +STORAGE,storage.l6-level-size,Size of the SSTables in level 6,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.marked-for-compaction-files,Count of SSTables marked for compaction,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,storage.queue.store-failures,Number of replicas which failed processing in replica queues due to retryable store errors,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.count,The count of cache blocks in the secondary cache (not sstable blocks),Cache items,GAUGE,COUNT,AVG,NONE +STORAGE,storage.secondary-cache.evictions,The number of times a cache block was evicted from the secondary cache,Num evictions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.reads-full-hit,The number of reads where all data returned was read from the secondary cache,Num reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.reads-multi-block,The number of secondary cache reads that require reading data from 2+ cache blocks,Num reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.reads-multi-shard,The number of secondary cache reads that require reading data from 2+ shards,Num reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.reads-no-hit,The number of reads where no data returned was read from the secondary cache,Num reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.reads-partial-hit,The number of reads where some data returned was read from the secondary cache,Num reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.reads-total,The number of reads from the secondary cache,Num reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.secondary-cache.size,The number of sstable bytes stored in the secondary cache,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.secondary-cache.write-back-failures,The number of times writing a cache block to the secondary cache failed,Num failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.shared-storage.read,Bytes read from shared storage,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.shared-storage.write,Bytes written to external storage,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.single-delete.ineffectual,Number of SingleDeletes that were ineffectual,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.single-delete.invariant-violation,Number of SingleDelete invariant violations,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.sstable.compression.none.count,Count of SSTables that are uncompressed.,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,storage.sstable.compression.snappy.count,Count of SSTables that have been compressed with the snappy compression algorithm.,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,storage.sstable.compression.unknown.count,Count of SSTables that have an unknown compression algorithm.,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,storage.sstable.compression.zstd.count,Count of SSTables that have been compressed with the zstd compression algorithm.,SSTables,GAUGE,COUNT,AVG,NONE +STORAGE,storage.sstable.zombie.bytes,"Bytes in SSTables that have been logically deleted, but can't yet be physically deleted because an open iterator may be reading them.",Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storage.wal.bytes_in,The number of logical bytes the storage engine has written to the WAL,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.wal.bytes_written,The number of bytes the storage engine has written to the WAL,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.wal.failover.primary.duration,Cumulative time spent writing to the primary WAL directory. Only populated when WAL failover is configured,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.wal.failover.secondary.duration,Cumulative time spent writing to the secondary WAL directory. Only populated when WAL failover is configured,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.wal.failover.switch.count,Count of the number of times WAL writing has switched from primary to secondary and vice versa.,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storage.wal.failover.write_and_sync.latency,The observed latency for writing and syncing to the write ahead log. Only populated when WAL failover is configured,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,storage.wal.fsync.latency,The write ahead log fsync latency,Fsync Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,storage.write-amplification,"Running measure of write-amplification. + +Write amplification is measured as the ratio of bytes written to disk relative to the logical +bytes present in sstables, over the life of a store. This metric is a running average +of the write amplification as tracked by Pebble.",Ratio of bytes written to logical bytes,GAUGE,COUNT,AVG,NONE +STORAGE,storage.write-stall-nanos,Total write stall duration in nanos,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +STORAGE,storage.write-stalls,Number of instances of intentional write stalls to backpressure incoming writes,Events,GAUGE,COUNT,AVG,NONE +STORAGE,storeliveness.heartbeat.failures,Number of Store Liveness heartbeats that failed to be sent out by the Store Liveness Support Manager,Heartbeats,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.heartbeat.successes,Number of Store Liveness heartbeats sent out by the Store Liveness Support Manager,Heartbeats,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.message_handle.failures,Number of incoming Store Liveness messages that failed to be handled by the Store Liveness Support Manager,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.message_handle.successes,Number of incoming Store Liveness messages handled by the Store Liveness Support Manager,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.support_for.stores,Number of stores that the Store Liveness Support Manager has ever provided support for,Stores,GAUGE,COUNT,AVG,NONE +STORAGE,storeliveness.support_from.stores,Number of stores that the Store Liveness Support Manager is requesting support from by sending heartbeats,Stores,GAUGE,COUNT,AVG,NONE +STORAGE,storeliveness.support_withdraw.failures,Number of times the Store Liveness Support Manager has encountered an error while withdrawing support for another store,Support Withdrawals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.support_withdraw.successes,Number of times the Store Liveness Support Manager has successfully withdrawn support for another store,Support Withdrawals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.transport.receive-queue-bytes,Total byte size of pending incoming messages from Store Liveness Transport,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storeliveness.transport.receive-queue-size,Number of pending incoming messages from the Store Liveness Transport,Messages,GAUGE,COUNT,AVG,NONE +STORAGE,storeliveness.transport.receive_dropped,Number of Store Liveness messages dropped by the Store Liveness Transport on the receiver side,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.transport.received,Number of Store Liveness messages received by the Store Liveness Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.transport.send-queue-bytes,Total byte size of pending outgoing messages in all Store Liveness Transport per-store send queues,Bytes,GAUGE,BYTES,AVG,NONE +STORAGE,storeliveness.transport.send-queue-idle,Number of Store Liveness Transport per-store send queues that have become idle due to no recently-sent messages,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.transport.send-queue-size,Number of pending outgoing messages in all Store Liveness Transport per-store send queues,Messages,GAUGE,COUNT,AVG,NONE +STORAGE,storeliveness.transport.send_dropped,Number of Store Liveness messages dropped by the Store Liveness Transport on the sender side,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,storeliveness.transport.sent,Number of Store Liveness messages sent by the Store Liveness Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,sysbytes,Number of bytes in system KV pairs,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,syscount,Count of system KV pairs,Keys,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.cross_region_network_ru,Total number of RUs charged for cross-region network traffic,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,tenant.consumption.external_io_egress_bytes,Total number of bytes written to external services such as cloud storage providers,Bytes,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.external_io_ingress_bytes,Total number of bytes read from external services such as cloud storage providers,Bytes,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.kv_request_units,RU consumption attributable to KV,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,tenant.consumption.pgwire_egress_bytes,Total number of bytes transferred from a SQL pod to the client,Bytes,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.read_batches,Total number of KV read batches,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.read_bytes,Total number of bytes read from KV,Bytes,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.read_requests,Total number of KV read requests,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.request_units,Total RU consumption,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,tenant.consumption.sql_pods_cpu_seconds,Total amount of CPU used by SQL pods,CPU Seconds,GAUGE,SECONDS,AVG,NONE +STORAGE,tenant.consumption.write_batches,Total number of KV write batches,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.write_bytes,Total number of bytes written to KV,Bytes,GAUGE,COUNT,AVG,NONE +STORAGE,tenant.consumption.write_requests,Total number of KV write requests,Requests,GAUGE,COUNT,AVG,NONE +STORAGE,timeseries.write.bytes,Total size in bytes of metric samples written to disk,Storage,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,timeseries.write.errors,Total errors encountered while attempting to write metrics to disk,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,timeseries.write.samples,Total number of metric samples written to disk,Metric Samples,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,totalbytes,Total number of bytes taken up by keys and values including non-live data,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,tscache.skl.pages,Number of pages in the timestamp cache,Pages,GAUGE,COUNT,AVG,NONE +STORAGE,tscache.skl.rotations,Number of page rotations in the timestamp cache,Page Rotations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.commit_waits.before_commit_trigger,Number of KV transactions that had to commit-wait on the server before committing because they had a commit trigger,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side.1PC.failure,Number of batches that attempted to commit using 1PC and failed,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side.1PC.success,Number of batches that attempted to commit using 1PC and succeeded,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side_retry.read_evaluation.failure,Number of read batches that were not successfully refreshed server side,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side_retry.read_evaluation.success,Number of read batches that were successfully refreshed server side,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side_retry.uncertainty_interval_error.failure,Number of batches that ran into uncertainty interval errors that were not successfully refreshed server side,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side_retry.uncertainty_interval_error.success,Number of batches that ran into uncertainty interval errors that were successfully refreshed server side,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side_retry.write_evaluation.failure,Number of write batches that were not successfully refreshed server side,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txn.server_side_retry.write_evaluation.success,Number of write batches that were successfully refreshed server side,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnrecovery.attempts.pending,Number of transaction recovery attempts currently in-flight,Recovery Attempts,GAUGE,COUNT,AVG,NONE +STORAGE,txnrecovery.attempts.total,Number of transaction recovery attempts executed,Recovery Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnrecovery.failures,Number of transaction recovery attempts that failed,Recovery Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnrecovery.successes.aborted,Number of transaction recovery attempts that aborted a transaction,Recovery Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnrecovery.successes.committed,Number of transaction recovery attempts that committed a transaction,Recovery Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnrecovery.successes.pending,Number of transaction recovery attempts that left a transaction pending,Recovery Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnwaitqueue.deadlocks_total,Number of deadlocks detected by the txn wait queue,Deadlocks,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +STORAGE,txnwaitqueue.pushee.waiting,Number of pushees on the txn wait queue,Waiting Pushees,GAUGE,COUNT,AVG,NONE +STORAGE,txnwaitqueue.pusher.slow,The total number of cases where a pusher waited more than the excessive wait threshold,Slow Pushers,GAUGE,COUNT,AVG,NONE +STORAGE,txnwaitqueue.pusher.wait_time,Histogram of durations spent in queue by pushers,Pusher wait time,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,txnwaitqueue.pusher.waiting,Number of pushers on the txn wait queue,Waiting Pushers,GAUGE,COUNT,AVG,NONE +STORAGE,txnwaitqueue.query.wait_time,Histogram of durations spent in queue by queries,Query wait time,HISTOGRAM,NANOSECONDS,AVG,NONE +STORAGE,txnwaitqueue.query.waiting,Number of transaction status queries waiting for an updated transaction record,Waiting Queries,GAUGE,COUNT,AVG,NONE +STORAGE,valbytes,Number of bytes taken up by values,Storage,GAUGE,BYTES,AVG,NONE +STORAGE,valcount,Count of all values,MVCC Values,GAUGE,COUNT,AVG,NONE +APPLICATION,auth.cert.conn.latency,Latency to establish and authenticate a SQL connection using certificate,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,auth.gss.conn.latency,Latency to establish and authenticate a SQL connection using GSS,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,auth.jwt.conn.latency,Latency to establish and authenticate a SQL connection using JWT Token,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,auth.ldap.conn.latency,Latency to establish and authenticate a SQL connection using LDAP,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,auth.password.conn.latency,Latency to establish and authenticate a SQL connection using password,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,auth.scram.conn.latency,Latency to establish and authenticate a SQL connection using SCRAM,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,backup.last-failed-time.kms-inaccessible,The unix timestamp of the most recent failure of backup due to errKMSInaccessible by a backup specified as maintaining this metric,Jobs,GAUGE,TIMESTAMP_SEC,AVG,NONE +APPLICATION,changefeed.admit_latency,"Event admission latency: a difference between event MVCC timestamp and the time it was admitted into changefeed pipeline; Note: this metric includes the time spent waiting until event can be processed due to backpressure or time spent resolving schema descriptors. Also note, this metric excludes latency during backfill",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.aggregator_progress,The earliest timestamp up to which any aggregator is guaranteed to have emitted all values for,Unix Timestamp Nanoseconds,GAUGE,TIMESTAMP_NS,AVG,NONE +APPLICATION,changefeed.backfill_count,Number of changefeeds currently executing backfill,Count,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.backfill_pending_ranges,Number of ranges in an ongoing backfill that are yet to be fully emitted,Count,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.batch_reduction_count,Number of times a changefeed aggregator node attempted to reduce the size of message batches it emitted to the sink,Batch Size Reductions,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.buffer_entries.allocated_mem,Current quota pool memory allocation,Bytes,GAUGE,BYTES,AVG,NONE +APPLICATION,changefeed.buffer_entries.allocated_mem.aggregator,Current quota pool memory allocation - between the kvfeed and the sink,Bytes,GAUGE,BYTES,AVG,NONE +APPLICATION,changefeed.buffer_entries.allocated_mem.rangefeed,Current quota pool memory allocation - between the rangefeed and the kvfeed,Bytes,GAUGE,BYTES,AVG,NONE +APPLICATION,changefeed.buffer_entries.flush,Number of flush elements added to the buffer,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.flush.aggregator,Number of flush elements added to the buffer - between the kvfeed and the sink,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.flush.rangefeed,Number of flush elements added to the buffer - between the rangefeed and the kvfeed,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.in,Total entries entering the buffer between raft and changefeed sinks,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.in.aggregator,Total entries entering the buffer between raft and changefeed sinks - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.in.rangefeed,Total entries entering the buffer between raft and changefeed sinks - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.kv,Number of kv elements added to the buffer,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.kv.aggregator,Number of kv elements added to the buffer - between the kvfeed and the sink,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.kv.rangefeed,Number of kv elements added to the buffer - between the rangefeed and the kvfeed,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.out,Total entries leaving the buffer between raft and changefeed sinks,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.out.aggregator,Total entries leaving the buffer between raft and changefeed sinks - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.out.rangefeed,Total entries leaving the buffer between raft and changefeed sinks - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.released,"Total entries processed, emitted and acknowledged by the sinks",Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.released.aggregator,"Total entries processed, emitted and acknowledged by the sinks - between the kvfeed and the sink",Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.released.rangefeed,"Total entries processed, emitted and acknowledged by the sinks - between the rangefeed and the kvfeed",Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.resolved,Number of resolved elements added to the buffer,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.resolved.aggregator,Number of resolved elements added to the buffer - between the kvfeed and the sink,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries.resolved.rangefeed,Number of resolved elements added to the buffer - between the rangefeed and the kvfeed,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries_mem.acquired,Total amount of memory acquired for entries as they enter the system,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries_mem.acquired.aggregator,Total amount of memory acquired for entries as they enter the system - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries_mem.acquired.rangefeed,Total amount of memory acquired for entries as they enter the system - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries_mem.released,Total amount of memory released by the entries after they have been emitted,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries_mem.released.aggregator,Total amount of memory released by the entries after they have been emitted - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_entries_mem.released.rangefeed,Total amount of memory released by the entries after they have been emitted - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_pushback_nanos,Total time spent waiting while the buffer was full,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_pushback_nanos.aggregator,Total time spent waiting while the buffer was full - between the kvfeed and the sink,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.buffer_pushback_nanos.rangefeed,Total time spent waiting while the buffer was full - between the rangefeed and the kvfeed,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.bytes.messages_pushback_nanos,Total time spent throttled for bytes quota,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.checkpoint_hist_nanos,Time spent checkpointing changefeed progress,Changefeeds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.checkpoint_progress,The earliest timestamp of any changefeed's persisted checkpoint (values prior to this timestamp will never need to be re-emitted),Unix Timestamp Nanoseconds,GAUGE,TIMESTAMP_NS,AVG,NONE +APPLICATION,changefeed.cloudstorage_buffered_bytes,The number of bytes buffered in cloudstorage sink files which have not been emitted yet,Bytes,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.commit_latency,"Event commit latency: a difference between event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the oldest event in the batch and acknowledgement is recorded; Excludes latency during backfill",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.emitted_batch_sizes,Size of batches emitted emitted by all feeds,Number of Messages in Batch,HISTOGRAM,COUNT,AVG,NONE +APPLICATION,changefeed.emitted_bytes,Bytes emitted by all feeds,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.emitted_messages,Messages emitted by all feeds,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.error_retries,Total retryable errors encountered by all changefeeds,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.failures,Total number of changefeed jobs which have failed,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.filtered_messages,Messages filtered out by all feeds. This count does not include the number of messages that may be filtered due to the range constraints.,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.flush.messages_pushback_nanos,Total time spent throttled for flush quota,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.flush_hist_nanos,Time spent flushing messages across all changefeeds,Changefeeds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.flushed_bytes,Bytes emitted by all feeds; maybe different from changefeed.emitted_bytes when compression is enabled,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.flushes,Total flushes across all feeds,Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.forwarded_resolved_messages,Resolved timestamps forwarded from the change aggregator to the change frontier,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.frontier_updates,Number of change frontier updates across all feeds,Updates,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.internal_retry_message_count,Number of messages for which an attempt to retry them within an aggregator node was made,Messages,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.kafka_throttling_hist_nanos,Time spent in throttling due to exceeding kafka quota,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.lagging_ranges,The number of ranges considered to be lagging behind,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.max_behind_nanos,The most any changefeed's persisted checkpoint is behind the present,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.message_size_hist,Message size histogram,Bytes,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,changefeed.messages.messages_pushback_nanos,Total time spent throttled for messages quota,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.network.bytes_in,The number of bytes received from the network by changefeeds,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.network.bytes_out,The number of bytes sent over the network by changefeeds,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.nprocs_consume_event_nanos,Total time spent waiting to add an event to the parallel consumer,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.nprocs_flush_nanos,Total time spent idle waiting for the parallel consumer to flush,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.nprocs_in_flight_count,Number of buffered events in the parallel consumer,Count of Events,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.parallel_io_in_flight_keys,The number of keys currently in-flight which may contend with batches pending to be emitted,Keys,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.parallel_io_pending_rows,Number of rows which are blocked from being sent due to conflicting in-flight keys,Keys,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.parallel_io_queue_nanos,Time that outgoing requests to the sink spend waiting in a queue due to in-flight requests with conflicting keys,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.parallel_io_result_queue_nanos,Time that incoming results from the sink spend waiting in parallel io emitter before they are acknowledged by the changefeed,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.queue_time_nanos,Time KV event spent waiting to be processed,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.running,"Number of currently running changefeeds, including sinkless",Changefeeds,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.schema_registry.registrations,Number of registration attempts with the schema registry,Registrations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.schema_registry.retry_count,Number of retries encountered when sending requests to the schema registry,Retries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.schemafeed.table_history_scans,The number of table history scans during polling,Counts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.schemafeed.table_metadata_nanos,Time blocked while verifying table metadata histories,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.sink_batch_hist_nanos,Time spent batched in the sink buffer before being flushed and acknowledged,Changefeeds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.sink_errors,Number of changefeed errors caused by the sink,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.sink_io_inflight,The number of keys currently inflight as IO requests being sent to the sink,Messages,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.size_based_flushes,Total size based flushes across all feeds,Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.stage.checkpoint_job_progress.latency,Latency of the changefeed stage: checkpointing job progress,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.downstream_client_send.latency,Latency of the changefeed stage: flushing messages from the sink's client to its downstream. This includes sends that failed for most but not all sinks.,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.emit_row.latency,Latency of the changefeed stage: emitting row to sink,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.encode.latency,Latency of the changefeed stage: encoding data,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.kv_feed_buffer.latency,Latency of the changefeed stage: waiting to buffer kv events,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.kv_feed_wait_for_table_event.latency,Latency of the changefeed stage: waiting for a table schema event to join to the kv event,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.rangefeed_buffer_checkpoint.latency,Latency of the changefeed stage: buffering rangefeed checkpoint events,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.stage.rangefeed_buffer_value.latency,Latency of the changefeed stage: buffering rangefeed value events,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.total_ranges,The total number of ranges being watched by changefeed aggregators,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,changefeed.usage.error_count,Count of errors encountered while generating usage metrics for changefeeds,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,changefeed.usage.query_duration,Time taken by the queries used to generate usage metrics for changefeeds,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,changefeed.usage.table_bytes,Aggregated number of bytes of data per table watched by changefeeds,Storage,GAUGE,BYTES,AVG,NONE +APPLICATION,clock-offset.meannanos,Mean clock offset with other nodes,Clock Offset,GAUGE,NANOSECONDS,AVG,NONE +APPLICATION,clock-offset.stddevnanos,Stddev clock offset with other nodes,Clock Offset,GAUGE,NANOSECONDS,AVG,NONE +APPLICATION,cloud.conns_opened,HTTP connections opened by cloud operations,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.conns_reused,HTTP connections reused by cloud operations,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.listing_results,Listing results by all cloud operations,Results,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.listings,Listing operations by all cloud operations,Calls,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.open_readers,Currently open readers for cloud IO,Readers,GAUGE,COUNT,AVG,NONE +APPLICATION,cloud.open_writers,Currently open writers for cloud IO,Writers,GAUGE,COUNT,AVG,NONE +APPLICATION,cloud.read_bytes,Bytes read from all cloud operations,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.readers_opened,Readers opened by all cloud operations,Files,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.tls_handshakes,TLS handshakes done by cloud operations,Handshakes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.write_bytes,Bytes written by all cloud operations,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cloud.writers_opened,Writers opened by all cloud operations,files,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,cluster.preserve-downgrade-option.last-updated,Unix timestamp of last updated time for cluster.preserve_downgrade_option,Timestamp,GAUGE,TIMESTAMP_SEC,AVG,NONE +APPLICATION,distsender.batch_requests.cross_region.bytes,"Total byte count of replica-addressed batch requests processed cross + region when region tiers are configured",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batch_requests.cross_zone.bytes,"Total byte count of replica-addressed batch requests processed cross + zone within the same region when region and zone tiers are configured. + However, if the region tiers are not configured, this count may also include + batch data sent between different regions. Ensuring consistent configuration + of region and zone tiers across nodes helps to accurately monitor the data + transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batch_requests.replica_addressed.bytes,Total byte count of replica-addressed batch requests processed,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batch_responses.cross_region.bytes,"Total byte count of replica-addressed batch responses received cross + region when region tiers are configured",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batch_responses.cross_zone.bytes,"Total byte count of replica-addressed batch responses received cross + zone within the same region when region and zone tiers are configured. + However, if the region tiers are not configured, this count may also include + batch data received between different regions. Ensuring consistent + configuration of region and zone tiers across nodes helps to accurately + monitor the data transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batch_responses.replica_addressed.bytes,Total byte count of replica-addressed batch responses received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batches,Number of batches processed,Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batches.async.in_progress,Number of partial batches currently being executed asynchronously,Partial Batches,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.batches.async.sent,Number of partial batches sent asynchronously,Partial Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batches.async.throttled,Number of partial batches not sent asynchronously due to throttling,Partial Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batches.async.throttled_cumulative_duration_nanos,Cumulative duration of partial batches being throttled (in nanoseconds),Throttled Duration,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.batches.partial,Number of partial batches processed after being divided on range boundaries,Partial Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.circuit_breaker.replicas.count,Number of replicas currently tracked by DistSender circuit breakers,Replicas,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.circuit_breaker.replicas.probes.failure,Cumulative number of failed DistSender replica circuit breaker probes,Probes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.circuit_breaker.replicas.probes.running,Number of currently running DistSender replica circuit breaker probes,Probes,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.circuit_breaker.replicas.probes.success,Cumulative number of successful DistSender replica circuit breaker probes,Probes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.circuit_breaker.replicas.requests.cancelled,Cumulative number of requests cancelled when DistSender replica circuit breakers trip,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.circuit_breaker.replicas.requests.rejected,Cumulative number of requests rejected by tripped DistSender replica circuit breakers,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.circuit_breaker.replicas.tripped,Number of DistSender replica circuit breakers currently tripped,Replicas,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.circuit_breaker.replicas.tripped_events,Cumulative number of DistSender replica circuit breakers tripped over time,Replicas,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.errors.inleasetransferbackoffs,Number of times backed off due to NotLeaseHolderErrors during lease transfer,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.errors.notleaseholder,Number of NotLeaseHolderErrors encountered from replica-addressed RPCs,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.catchup_ranges,"Number of ranges in catchup mode + +This counts the number of ranges with an active rangefeed that are performing catchup scan. +",Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.rangefeed.catchup_ranges_waiting_client_side,Number of ranges waiting on the client-side limiter to perform catchup scans,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.rangefeed.error_catchup_ranges,Number of ranges in catchup mode which experienced an error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.local_ranges,Number of ranges connected to local node.,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.rangefeed.restart_ranges,Number of ranges that were restarted due to transient errors,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.logical_ops_missing,Number of ranges that encountered retryable LOGICAL_OPS_MISSING error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.manual_range_split,Number of ranges that encountered retryable MANUAL_RANGE_SPLIT error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.no_leaseholder,Number of ranges that encountered retryable NO_LEASEHOLDER error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.node_not_found,Number of ranges that encountered retryable node not found error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.raft_snapshot,Number of ranges that encountered retryable RAFT_SNAPSHOT error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.range_key_mismatch,Number of ranges that encountered retryable range key mismatch error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.range_merged,Number of ranges that encountered retryable RANGE_MERGED error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.range_not_found,Number of ranges that encountered retryable range not found error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.range_split,Number of ranges that encountered retryable RANGE_SPLIT error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.rangefeed_closed,Number of ranges that encountered retryable RANGEFEED_CLOSED error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.replica_removed,Number of ranges that encountered retryable REPLICA_REMOVED error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.send,Number of ranges that encountered retryable send error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.slow_consumer,Number of ranges that encountered retryable SLOW_CONSUMER error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.store_not_found,Number of ranges that encountered retryable store not found error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.retry.unknown,Number of ranges that encountered retryable unknown error,Ranges,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rangefeed.total_ranges,"Number of ranges executing rangefeed + +This counts the number of ranges with an active rangefeed. +",Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,distsender.rangelookups,Number of range lookups,Range Lookups,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.addsstable.sent,"Number of AddSSTable requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminchangereplicas.sent,"Number of AdminChangeReplicas requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminmerge.sent,"Number of AdminMerge requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminrelocaterange.sent,"Number of AdminRelocateRange requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminscatter.sent,"Number of AdminScatter requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminsplit.sent,"Number of AdminSplit requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.admintransferlease.sent,"Number of AdminTransferLease requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminunsplit.sent,"Number of AdminUnsplit requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.adminverifyprotectedtimestamp.sent,"Number of AdminVerifyProtectedTimestamp requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.barrier.sent,"Number of Barrier requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.checkconsistency.sent,"Number of CheckConsistency requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.clearrange.sent,"Number of ClearRange requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.computechecksum.sent,"Number of ComputeChecksum requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.conditionalput.sent,"Number of ConditionalPut requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.delete.sent,"Number of Delete requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.deleterange.sent,"Number of DeleteRange requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.endtxn.sent,"Number of EndTxn requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.ambiguousresulterrtype,"Number of AmbiguousResultErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.batchtimestampbeforegcerrtype,"Number of BatchTimestampBeforeGCErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.communicationerrtype,"Number of CommunicationErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.conditionfailederrtype,"Number of ConditionFailedErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(0),"Number of ErrorDetailType(0) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(15),"Number of ErrorDetailType(15) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(19),"Number of ErrorDetailType(19) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(20),"Number of ErrorDetailType(20) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(21),"Number of ErrorDetailType(21) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(23),"Number of ErrorDetailType(23) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(24),"Number of ErrorDetailType(24) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(29),"Number of ErrorDetailType(29) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(30),"Number of ErrorDetailType(30) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.errordetailtype(33),"Number of ErrorDetailType(33) errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.indeterminatecommiterrtype,"Number of IndeterminateCommitErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.integeroverflowerrtype,"Number of IntegerOverflowErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.intentmissingerrtype,"Number of IntentMissingErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.internalerrtype,"Number of InternalErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.invalidleaseerrtype,"Number of InvalidLeaseErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.leaserejectederrtype,"Number of LeaseRejectedErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.lockconflicterrtype,"Number of LockConflictErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.mergeinprogresserrtype,"Number of MergeInProgressErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.mintimestampboundunsatisfiableerrtype,"Number of MinTimestampBoundUnsatisfiableErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.mvcchistorymutationerrtype,"Number of MVCCHistoryMutationErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.nodeunavailableerrtype,"Number of NodeUnavailableErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.notleaseholdererrtype,"Number of NotLeaseHolderErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.oprequirestxnerrtype,"Number of OpRequiresTxnErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.optimisticevalconflictserrtype,"Number of OptimisticEvalConflictsErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.proxyfailederrtype,"Number of ProxyFailedErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.raftgroupdeletederrtype,"Number of RaftGroupDeletedErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.rangefeedretryerrtype,"Number of RangeFeedRetryErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.rangekeymismatcherrtype,"Number of RangeKeyMismatchErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.rangenotfounderrtype,"Number of RangeNotFoundErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.readwithinuncertaintyintervalerrtype,"Number of ReadWithinUncertaintyIntervalErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.refreshfailederrtype,"Number of RefreshFailedErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.replicacorruptionerrtype,"Number of ReplicaCorruptionErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.replicatooolderrtype,"Number of ReplicaTooOldErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.replicaunavailableerrtype,"Number of ReplicaUnavailableErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.storenotfounderrtype,"Number of StoreNotFoundErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.transactionabortederrtype,"Number of TransactionAbortedErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.transactionpusherrtype,"Number of TransactionPushErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.transactionretryerrtype,"Number of TransactionRetryErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.transactionretrywithprotorefresherrtype,"Number of TransactionRetryWithProtoRefreshErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.transactionstatuserrtype,"Number of TransactionStatusErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.txnalreadyencounterederrtype,"Number of TxnAlreadyEncounteredErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.unsupportedrequesterrtype,"Number of UnsupportedRequestErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.writeintenterrtype,"Number of WriteIntentErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.err.writetooolderrtype,"Number of WriteTooOldErrType errors received replica-bound RPCs + +This counts how often error of the specified type was received back from replicas +as part of executing possibly range-spanning requests. Failures to reach the target +replica will be accounted for as 'roachpb.CommunicationErrType' and unclassified +errors as 'roachpb.InternalErrType'. +",Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.export.sent,"Number of Export requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.gc.sent,"Number of GC requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.get.sent,"Number of Get requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.heartbeattxn.sent,"Number of HeartbeatTxn requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.increment.sent,"Number of Increment requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.initput.sent,"Number of InitPut requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.isspanempty.sent,"Number of IsSpanEmpty requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.leaseinfo.sent,"Number of LeaseInfo requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.linkexternalsstable.sent,"Number of LinkExternalSSTable requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.merge.sent,"Number of Merge requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.migrate.sent,"Number of Migrate requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.probe.sent,"Number of Probe requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.proxy.err,Number of attempts by a gateway to proxy a request which resulted in a failure.,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.proxy.forward.err,Number of attempts on a follower replica to proxy a request which resulted in a failure.,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.proxy.forward.sent,Number of attempts on a follower replica to proxy a request to an unreachable leaseholder.,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.proxy.sent,Number of attempts by a gateway to proxy a request to an unreachable leaseholder via a follower replica.,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.pushtxn.sent,"Number of PushTxn requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.put.sent,"Number of Put requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.queryintent.sent,"Number of QueryIntent requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.querylocks.sent,"Number of QueryLocks requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.queryresolvedtimestamp.sent,"Number of QueryResolvedTimestamp requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.querytxn.sent,"Number of QueryTxn requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.rangestats.sent,"Number of RangeStats requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.recomputestats.sent,"Number of RecomputeStats requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.recovertxn.sent,"Number of RecoverTxn requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.refresh.sent,"Number of Refresh requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.refreshrange.sent,"Number of RefreshRange requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.requestlease.sent,"Number of RequestLease requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.resolveintent.sent,"Number of ResolveIntent requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.resolveintentrange.sent,"Number of ResolveIntentRange requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.reversescan.sent,"Number of ReverseScan requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.revertrange.sent,"Number of RevertRange requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.scan.sent,"Number of Scan requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.sent,Number of replica-addressed RPCs sent,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.sent.local,Number of replica-addressed RPCs sent through the local-server optimization,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.sent.nextreplicaerror,Number of replica-addressed RPCs sent due to per-replica errors,RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.subsume.sent,"Number of Subsume requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.transferlease.sent,"Number of TransferLease requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.truncatelog.sent,"Number of TruncateLog requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.rpc.writebatch.sent,"Number of WriteBatch requests processed. + +This counts the requests in batches handed to DistSender, not the RPCs +sent to individual Ranges as a result.",RPCs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,distsender.slow.replicarpcs,"Number of slow replica-bound RPCs. + +Note that this is not a good signal for KV health. The remote side of the +RPCs tracked here may experience contention, so an end user can easily +cause values for this metric to be emitted by leaving a transaction open +for a long time and contending with it using a second transaction.",Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.adopt_iterations,number of job-adopt iterations performed by the registry,iterations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.currently_idle,Number of auto_config_env_runner jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_env_runner.currently_paused,Number of auto_config_env_runner jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_env_runner.currently_running,Number of auto_config_env_runner jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_env_runner.expired_pts_records,Number of expired protected timestamp records owned by auto_config_env_runner jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.fail_or_cancel_completed,Number of auto_config_env_runner jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.fail_or_cancel_failed,Number of auto_config_env_runner jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.fail_or_cancel_retry_error,Number of auto_config_env_runner jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.protected_age_sec,The age of the oldest PTS record protected by auto_config_env_runner jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_config_env_runner.protected_record_count,Number of protected timestamp records held by auto_config_env_runner jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_env_runner.resume_completed,Number of auto_config_env_runner jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.resume_failed,Number of auto_config_env_runner jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_env_runner.resume_retry_error,Number of auto_config_env_runner jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.currently_idle,Number of auto_config_runner jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_runner.currently_paused,Number of auto_config_runner jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_runner.currently_running,Number of auto_config_runner jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_runner.expired_pts_records,Number of expired protected timestamp records owned by auto_config_runner jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.fail_or_cancel_completed,Number of auto_config_runner jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.fail_or_cancel_failed,Number of auto_config_runner jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.fail_or_cancel_retry_error,Number of auto_config_runner jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.protected_age_sec,The age of the oldest PTS record protected by auto_config_runner jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_config_runner.protected_record_count,Number of protected timestamp records held by auto_config_runner jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_runner.resume_completed,Number of auto_config_runner jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.resume_failed,Number of auto_config_runner jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_runner.resume_retry_error,Number of auto_config_runner jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.currently_idle,Number of auto_config_task jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_task.currently_paused,Number of auto_config_task jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_task.currently_running,Number of auto_config_task jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_task.expired_pts_records,Number of expired protected timestamp records owned by auto_config_task jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.fail_or_cancel_completed,Number of auto_config_task jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.fail_or_cancel_failed,Number of auto_config_task jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.fail_or_cancel_retry_error,Number of auto_config_task jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.protected_age_sec,The age of the oldest PTS record protected by auto_config_task jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_config_task.protected_record_count,Number of protected timestamp records held by auto_config_task jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_config_task.resume_completed,Number of auto_config_task jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.resume_failed,Number of auto_config_task jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_config_task.resume_retry_error,Number of auto_config_task jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.currently_idle,Number of auto_create_partial_stats jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_partial_stats.currently_paused,Number of auto_create_partial_stats jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_partial_stats.currently_running,Number of auto_create_partial_stats jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_partial_stats.expired_pts_records,Number of expired protected timestamp records owned by auto_create_partial_stats jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.fail_or_cancel_completed,Number of auto_create_partial_stats jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.fail_or_cancel_failed,Number of auto_create_partial_stats jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.fail_or_cancel_retry_error,Number of auto_create_partial_stats jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.protected_age_sec,The age of the oldest PTS record protected by auto_create_partial_stats jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_create_partial_stats.protected_record_count,Number of protected timestamp records held by auto_create_partial_stats jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_partial_stats.resume_completed,Number of auto_create_partial_stats jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.resume_failed,Number of auto_create_partial_stats jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_partial_stats.resume_retry_error,Number of auto_create_partial_stats jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.currently_idle,Number of auto_create_stats jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_stats.currently_paused,Number of auto_create_stats jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_stats.currently_running,Number of auto_create_stats jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_stats.expired_pts_records,Number of expired protected timestamp records owned by auto_create_stats jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.fail_or_cancel_completed,Number of auto_create_stats jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.fail_or_cancel_failed,Number of auto_create_stats jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.fail_or_cancel_retry_error,Number of auto_create_stats jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.protected_age_sec,The age of the oldest PTS record protected by auto_create_stats jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_create_stats.protected_record_count,Number of protected timestamp records held by auto_create_stats jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_create_stats.resume_completed,Number of auto_create_stats jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.resume_failed,Number of auto_create_stats jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_create_stats.resume_retry_error,Number of auto_create_stats jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.currently_idle,Number of auto_schema_telemetry jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_schema_telemetry.currently_paused,Number of auto_schema_telemetry jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_schema_telemetry.currently_running,Number of auto_schema_telemetry jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_schema_telemetry.expired_pts_records,Number of expired protected timestamp records owned by auto_schema_telemetry jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.fail_or_cancel_completed,Number of auto_schema_telemetry jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.fail_or_cancel_failed,Number of auto_schema_telemetry jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.fail_or_cancel_retry_error,Number of auto_schema_telemetry jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.protected_age_sec,The age of the oldest PTS record protected by auto_schema_telemetry jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_schema_telemetry.protected_record_count,Number of protected timestamp records held by auto_schema_telemetry jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_schema_telemetry.resume_completed,Number of auto_schema_telemetry jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.resume_failed,Number of auto_schema_telemetry jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_schema_telemetry.resume_retry_error,Number of auto_schema_telemetry jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.currently_idle,Number of auto_span_config_reconciliation jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_span_config_reconciliation.currently_paused,Number of auto_span_config_reconciliation jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_span_config_reconciliation.currently_running,Number of auto_span_config_reconciliation jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_span_config_reconciliation.expired_pts_records,Number of expired protected timestamp records owned by auto_span_config_reconciliation jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.fail_or_cancel_completed,Number of auto_span_config_reconciliation jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.fail_or_cancel_failed,Number of auto_span_config_reconciliation jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.fail_or_cancel_retry_error,Number of auto_span_config_reconciliation jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.protected_age_sec,The age of the oldest PTS record protected by auto_span_config_reconciliation jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_span_config_reconciliation.protected_record_count,Number of protected timestamp records held by auto_span_config_reconciliation jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_span_config_reconciliation.resume_completed,Number of auto_span_config_reconciliation jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.resume_failed,Number of auto_span_config_reconciliation jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_span_config_reconciliation.resume_retry_error,Number of auto_span_config_reconciliation jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.currently_idle,Number of auto_sql_stats_compaction jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_sql_stats_compaction.currently_paused,Number of auto_sql_stats_compaction jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_sql_stats_compaction.currently_running,Number of auto_sql_stats_compaction jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_sql_stats_compaction.expired_pts_records,Number of expired protected timestamp records owned by auto_sql_stats_compaction jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.fail_or_cancel_completed,Number of auto_sql_stats_compaction jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.fail_or_cancel_failed,Number of auto_sql_stats_compaction jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.fail_or_cancel_retry_error,Number of auto_sql_stats_compaction jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.protected_age_sec,The age of the oldest PTS record protected by auto_sql_stats_compaction jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_sql_stats_compaction.protected_record_count,Number of protected timestamp records held by auto_sql_stats_compaction jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_sql_stats_compaction.resume_completed,Number of auto_sql_stats_compaction jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.resume_failed,Number of auto_sql_stats_compaction jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_sql_stats_compaction.resume_retry_error,Number of auto_sql_stats_compaction jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.currently_idle,Number of auto_update_sql_activity jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_update_sql_activity.currently_paused,Number of auto_update_sql_activity jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_update_sql_activity.currently_running,Number of auto_update_sql_activity jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_update_sql_activity.expired_pts_records,Number of expired protected timestamp records owned by auto_update_sql_activity jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.fail_or_cancel_completed,Number of auto_update_sql_activity jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.fail_or_cancel_failed,Number of auto_update_sql_activity jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.fail_or_cancel_retry_error,Number of auto_update_sql_activity jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.protected_age_sec,The age of the oldest PTS record protected by auto_update_sql_activity jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.auto_update_sql_activity.protected_record_count,Number of protected timestamp records held by auto_update_sql_activity jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.auto_update_sql_activity.resume_completed,Number of auto_update_sql_activity jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.resume_failed,Number of auto_update_sql_activity jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.auto_update_sql_activity.resume_retry_error,Number of auto_update_sql_activity jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.currently_idle,Number of backup jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.backup.currently_paused,Number of backup jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.backup.currently_running,Number of backup jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.backup.expired_pts_records,Number of expired protected timestamp records owned by backup jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.fail_or_cancel_completed,Number of backup jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.fail_or_cancel_failed,Number of backup jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.fail_or_cancel_retry_error,Number of backup jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.protected_age_sec,The age of the oldest PTS record protected by backup jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.backup.protected_record_count,Number of protected timestamp records held by backup jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.backup.resume_completed,Number of backup jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.resume_failed,Number of backup jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.backup.resume_retry_error,Number of backup jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.currently_idle,Number of changefeed jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.changefeed.currently_paused,Number of changefeed jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.changefeed.currently_running,Number of changefeed jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.changefeed.expired_pts_records,Number of expired protected timestamp records owned by changefeed jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.fail_or_cancel_completed,Number of changefeed jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.fail_or_cancel_failed,Number of changefeed jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.fail_or_cancel_retry_error,Number of changefeed jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.protected_age_sec,The age of the oldest PTS record protected by changefeed jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.changefeed.protected_record_count,Number of protected timestamp records held by changefeed jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.changefeed.resume_completed,Number of changefeed jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.resume_failed,Number of changefeed jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.changefeed.resume_retry_error,Number of changefeed jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.claimed_jobs,number of jobs claimed in job-adopt iterations,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.currently_idle,Number of create_stats jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.create_stats.currently_paused,Number of create_stats jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.create_stats.currently_running,Number of create_stats jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.create_stats.expired_pts_records,Number of expired protected timestamp records owned by create_stats jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.fail_or_cancel_completed,Number of create_stats jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.fail_or_cancel_failed,Number of create_stats jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.fail_or_cancel_retry_error,Number of create_stats jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.protected_age_sec,The age of the oldest PTS record protected by create_stats jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.create_stats.protected_record_count,Number of protected timestamp records held by create_stats jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.create_stats.resume_completed,Number of create_stats jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.resume_failed,Number of create_stats jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.create_stats.resume_retry_error,Number of create_stats jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.currently_idle,Number of history_retention jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.history_retention.currently_paused,Number of history_retention jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.history_retention.currently_running,Number of history_retention jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.history_retention.expired_pts_records,Number of expired protected timestamp records owned by history_retention jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.fail_or_cancel_completed,Number of history_retention jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.fail_or_cancel_failed,Number of history_retention jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.fail_or_cancel_retry_error,Number of history_retention jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.protected_age_sec,The age of the oldest PTS record protected by history_retention jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.history_retention.protected_record_count,Number of protected timestamp records held by history_retention jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.history_retention.resume_completed,Number of history_retention jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.resume_failed,Number of history_retention jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.history_retention.resume_retry_error,Number of history_retention jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.currently_idle,Number of import jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import.currently_paused,Number of import jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import.currently_running,Number of import jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import.expired_pts_records,Number of expired protected timestamp records owned by import jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.fail_or_cancel_completed,Number of import jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.fail_or_cancel_failed,Number of import jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.fail_or_cancel_retry_error,Number of import jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.protected_age_sec,The age of the oldest PTS record protected by import jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.import.protected_record_count,Number of protected timestamp records held by import jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import.resume_completed,Number of import jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.resume_failed,Number of import jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import.resume_retry_error,Number of import jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.currently_idle,Number of import_rollback jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import_rollback.currently_paused,Number of import_rollback jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import_rollback.currently_running,Number of import_rollback jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import_rollback.expired_pts_records,Number of expired protected timestamp records owned by import_rollback jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.fail_or_cancel_completed,Number of import_rollback jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.fail_or_cancel_failed,Number of import_rollback jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.fail_or_cancel_retry_error,Number of import_rollback jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.protected_age_sec,The age of the oldest PTS record protected by import_rollback jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.import_rollback.protected_record_count,Number of protected timestamp records held by import_rollback jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.import_rollback.resume_completed,Number of import_rollback jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.resume_failed,Number of import_rollback jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.import_rollback.resume_retry_error,Number of import_rollback jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.currently_idle,Number of key_visualizer jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.key_visualizer.currently_paused,Number of key_visualizer jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.key_visualizer.currently_running,Number of key_visualizer jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.key_visualizer.expired_pts_records,Number of expired protected timestamp records owned by key_visualizer jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.fail_or_cancel_completed,Number of key_visualizer jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.fail_or_cancel_failed,Number of key_visualizer jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.fail_or_cancel_retry_error,Number of key_visualizer jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.protected_age_sec,The age of the oldest PTS record protected by key_visualizer jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.key_visualizer.protected_record_count,Number of protected timestamp records held by key_visualizer jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.key_visualizer.resume_completed,Number of key_visualizer jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.resume_failed,Number of key_visualizer jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.key_visualizer.resume_retry_error,Number of key_visualizer jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.currently_idle,Number of logical_replication jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.logical_replication.currently_paused,Number of logical_replication jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.logical_replication.currently_running,Number of logical_replication jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.logical_replication.expired_pts_records,Number of expired protected timestamp records owned by logical_replication jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.fail_or_cancel_completed,Number of logical_replication jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.fail_or_cancel_failed,Number of logical_replication jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.fail_or_cancel_retry_error,Number of logical_replication jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.protected_age_sec,The age of the oldest PTS record protected by logical_replication jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.logical_replication.protected_record_count,Number of protected timestamp records held by logical_replication jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.logical_replication.resume_completed,Number of logical_replication jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.resume_failed,Number of logical_replication jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.logical_replication.resume_retry_error,Number of logical_replication jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.metrics.task_failed,Number of metrics poller tasks that failed,errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.currently_idle,Number of migration jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.migration.currently_paused,Number of migration jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.migration.currently_running,Number of migration jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.migration.expired_pts_records,Number of expired protected timestamp records owned by migration jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.fail_or_cancel_completed,Number of migration jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.fail_or_cancel_failed,Number of migration jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.fail_or_cancel_retry_error,Number of migration jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.protected_age_sec,The age of the oldest PTS record protected by migration jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.migration.protected_record_count,Number of protected timestamp records held by migration jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.migration.resume_completed,Number of migration jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.resume_failed,Number of migration jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.migration.resume_retry_error,Number of migration jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.currently_idle,Number of mvcc_statistics_update jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.mvcc_statistics_update.currently_paused,Number of mvcc_statistics_update jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.mvcc_statistics_update.currently_running,Number of mvcc_statistics_update jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.mvcc_statistics_update.expired_pts_records,Number of expired protected timestamp records owned by mvcc_statistics_update jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.fail_or_cancel_completed,Number of mvcc_statistics_update jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.fail_or_cancel_failed,Number of mvcc_statistics_update jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.fail_or_cancel_retry_error,Number of mvcc_statistics_update jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.protected_age_sec,The age of the oldest PTS record protected by mvcc_statistics_update jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.mvcc_statistics_update.protected_record_count,Number of protected timestamp records held by mvcc_statistics_update jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.mvcc_statistics_update.resume_completed,Number of mvcc_statistics_update jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.resume_failed,Number of mvcc_statistics_update jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.mvcc_statistics_update.resume_retry_error,Number of mvcc_statistics_update jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.currently_idle,Number of new_schema_change jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.new_schema_change.currently_paused,Number of new_schema_change jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.new_schema_change.currently_running,Number of new_schema_change jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.new_schema_change.expired_pts_records,Number of expired protected timestamp records owned by new_schema_change jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.fail_or_cancel_completed,Number of new_schema_change jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.fail_or_cancel_failed,Number of new_schema_change jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.fail_or_cancel_retry_error,Number of new_schema_change jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.protected_age_sec,The age of the oldest PTS record protected by new_schema_change jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.new_schema_change.protected_record_count,Number of protected timestamp records held by new_schema_change jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.new_schema_change.resume_completed,Number of new_schema_change jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.resume_failed,Number of new_schema_change jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.new_schema_change.resume_retry_error,Number of new_schema_change jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.currently_idle,Number of poll_jobs_stats jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.poll_jobs_stats.currently_paused,Number of poll_jobs_stats jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.poll_jobs_stats.currently_running,Number of poll_jobs_stats jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.poll_jobs_stats.expired_pts_records,Number of expired protected timestamp records owned by poll_jobs_stats jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.fail_or_cancel_completed,Number of poll_jobs_stats jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.fail_or_cancel_failed,Number of poll_jobs_stats jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.fail_or_cancel_retry_error,Number of poll_jobs_stats jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.protected_age_sec,The age of the oldest PTS record protected by poll_jobs_stats jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.poll_jobs_stats.protected_record_count,Number of protected timestamp records held by poll_jobs_stats jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.poll_jobs_stats.resume_completed,Number of poll_jobs_stats jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.resume_failed,Number of poll_jobs_stats jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.poll_jobs_stats.resume_retry_error,Number of poll_jobs_stats jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.currently_idle,Number of replication_stream_ingestion jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_ingestion.currently_paused,Number of replication_stream_ingestion jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_ingestion.currently_running,Number of replication_stream_ingestion jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_ingestion.expired_pts_records,Number of expired protected timestamp records owned by replication_stream_ingestion jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.fail_or_cancel_completed,Number of replication_stream_ingestion jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.fail_or_cancel_failed,Number of replication_stream_ingestion jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.fail_or_cancel_retry_error,Number of replication_stream_ingestion jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.protected_age_sec,The age of the oldest PTS record protected by replication_stream_ingestion jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.replication_stream_ingestion.protected_record_count,Number of protected timestamp records held by replication_stream_ingestion jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_ingestion.resume_completed,Number of replication_stream_ingestion jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.resume_failed,Number of replication_stream_ingestion jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_ingestion.resume_retry_error,Number of replication_stream_ingestion jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.currently_idle,Number of replication_stream_producer jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_producer.currently_paused,Number of replication_stream_producer jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_producer.currently_running,Number of replication_stream_producer jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_producer.expired_pts_records,Number of expired protected timestamp records owned by replication_stream_producer jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.fail_or_cancel_completed,Number of replication_stream_producer jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.fail_or_cancel_failed,Number of replication_stream_producer jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.fail_or_cancel_retry_error,Number of replication_stream_producer jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.protected_age_sec,The age of the oldest PTS record protected by replication_stream_producer jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.replication_stream_producer.protected_record_count,Number of protected timestamp records held by replication_stream_producer jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.replication_stream_producer.resume_completed,Number of replication_stream_producer jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.resume_failed,Number of replication_stream_producer jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.replication_stream_producer.resume_retry_error,Number of replication_stream_producer jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.currently_idle,Number of restore jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.restore.currently_paused,Number of restore jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.restore.currently_running,Number of restore jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.restore.expired_pts_records,Number of expired protected timestamp records owned by restore jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.fail_or_cancel_completed,Number of restore jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.fail_or_cancel_failed,Number of restore jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.fail_or_cancel_retry_error,Number of restore jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.protected_age_sec,The age of the oldest PTS record protected by restore jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.restore.protected_record_count,Number of protected timestamp records held by restore jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.restore.resume_completed,Number of restore jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.resume_failed,Number of restore jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.restore.resume_retry_error,Number of restore jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.resumed_claimed_jobs,number of claimed-jobs resumed in job-adopt iterations,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.currently_idle,Number of row_level_ttl jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.row_level_ttl.currently_paused,Number of row_level_ttl jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.row_level_ttl.currently_running,Number of row_level_ttl jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.row_level_ttl.delete_duration,Duration for delete requests during row level TTL.,nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,jobs.row_level_ttl.expired_pts_records,Number of expired protected timestamp records owned by row_level_ttl jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.fail_or_cancel_completed,Number of row_level_ttl jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.fail_or_cancel_failed,Number of row_level_ttl jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.fail_or_cancel_retry_error,Number of row_level_ttl jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.num_active_spans,Number of active spans the TTL job is deleting from.,num_active_spans,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.row_level_ttl.protected_age_sec,The age of the oldest PTS record protected by row_level_ttl jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.row_level_ttl.protected_record_count,Number of protected timestamp records held by row_level_ttl jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.row_level_ttl.resume_completed,Number of row_level_ttl jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.resume_failed,Number of row_level_ttl jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.resume_retry_error,Number of row_level_ttl jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.rows_deleted,Number of rows deleted by the row level TTL job.,num_rows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.rows_selected,Number of rows selected for deletion by the row level TTL job.,num_rows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.row_level_ttl.select_duration,Duration for select requests during row level TTL.,nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,jobs.row_level_ttl.span_total_duration,Duration for processing a span during row level TTL.,nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,jobs.row_level_ttl.total_expired_rows,Approximate number of rows that have expired the TTL on the TTL table.,total_expired_rows,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.row_level_ttl.total_rows,Approximate number of rows on the TTL table.,total_rows,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.running_non_idle,number of running jobs that are not idle,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change.currently_idle,Number of schema_change jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change.currently_paused,Number of schema_change jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change.currently_running,Number of schema_change jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change.expired_pts_records,Number of expired protected timestamp records owned by schema_change jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change.fail_or_cancel_completed,Number of schema_change jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change.fail_or_cancel_failed,Number of schema_change jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change.fail_or_cancel_retry_error,Number of schema_change jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change.protected_age_sec,The age of the oldest PTS record protected by schema_change jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.schema_change.protected_record_count,Number of protected timestamp records held by schema_change jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change.resume_completed,Number of schema_change jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change.resume_failed,Number of schema_change jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change.resume_retry_error,Number of schema_change jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.currently_idle,Number of schema_change_gc jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change_gc.currently_paused,Number of schema_change_gc jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change_gc.currently_running,Number of schema_change_gc jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change_gc.expired_pts_records,Number of expired protected timestamp records owned by schema_change_gc jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.fail_or_cancel_completed,Number of schema_change_gc jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.fail_or_cancel_failed,Number of schema_change_gc jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.fail_or_cancel_retry_error,Number of schema_change_gc jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.protected_age_sec,The age of the oldest PTS record protected by schema_change_gc jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.schema_change_gc.protected_record_count,Number of protected timestamp records held by schema_change_gc jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.schema_change_gc.resume_completed,Number of schema_change_gc jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.resume_failed,Number of schema_change_gc jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.schema_change_gc.resume_retry_error,Number of schema_change_gc jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.currently_idle,Number of standby_read_ts_poller jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.standby_read_ts_poller.currently_paused,Number of standby_read_ts_poller jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.standby_read_ts_poller.currently_running,Number of standby_read_ts_poller jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.standby_read_ts_poller.expired_pts_records,Number of expired protected timestamp records owned by standby_read_ts_poller jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.fail_or_cancel_completed,Number of standby_read_ts_poller jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.fail_or_cancel_failed,Number of standby_read_ts_poller jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.fail_or_cancel_retry_error,Number of standby_read_ts_poller jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.protected_age_sec,The age of the oldest PTS record protected by standby_read_ts_poller jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.standby_read_ts_poller.protected_record_count,Number of protected timestamp records held by standby_read_ts_poller jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.standby_read_ts_poller.resume_completed,Number of standby_read_ts_poller jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.resume_failed,Number of standby_read_ts_poller jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.standby_read_ts_poller.resume_retry_error,Number of standby_read_ts_poller jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.currently_idle,Number of typedesc_schema_change jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.typedesc_schema_change.currently_paused,Number of typedesc_schema_change jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.typedesc_schema_change.currently_running,Number of typedesc_schema_change jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.typedesc_schema_change.expired_pts_records,Number of expired protected timestamp records owned by typedesc_schema_change jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.fail_or_cancel_completed,Number of typedesc_schema_change jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.fail_or_cancel_failed,Number of typedesc_schema_change jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.fail_or_cancel_retry_error,Number of typedesc_schema_change jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.protected_age_sec,The age of the oldest PTS record protected by typedesc_schema_change jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.typedesc_schema_change.protected_record_count,Number of protected timestamp records held by typedesc_schema_change jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.typedesc_schema_change.resume_completed,Number of typedesc_schema_change jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.resume_failed,Number of typedesc_schema_change jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.typedesc_schema_change.resume_retry_error,Number of typedesc_schema_change jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.currently_idle,Number of update_table_metadata_cache jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.update_table_metadata_cache.currently_paused,Number of update_table_metadata_cache jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.update_table_metadata_cache.currently_running,Number of update_table_metadata_cache jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.update_table_metadata_cache.expired_pts_records,Number of expired protected timestamp records owned by update_table_metadata_cache jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.fail_or_cancel_completed,Number of update_table_metadata_cache jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.fail_or_cancel_failed,Number of update_table_metadata_cache jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.fail_or_cancel_retry_error,Number of update_table_metadata_cache jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.protected_age_sec,The age of the oldest PTS record protected by update_table_metadata_cache jobs,seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,jobs.update_table_metadata_cache.protected_record_count,Number of protected timestamp records held by update_table_metadata_cache jobs,records,GAUGE,COUNT,AVG,NONE +APPLICATION,jobs.update_table_metadata_cache.resume_completed,Number of update_table_metadata_cache jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.resume_failed,Number of update_table_metadata_cache jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,jobs.update_table_metadata_cache.resume_retry_error,Number of update_table_metadata_cache jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,kv.protectedts.reconciliation.errors,number of errors encountered during reconciliation runs on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,kv.protectedts.reconciliation.num_runs,number of successful reconciliation runs on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,kv.protectedts.reconciliation.records_processed,number of records processed without error during reconciliation on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,kv.protectedts.reconciliation.records_removed,number of records removed during reconciliation runs on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,kv.streamer.batches.in_progress,Number of BatchRequests in progress across all KV Streamer operators,Batches,GAUGE,COUNT,AVG,NONE +APPLICATION,kv.streamer.batches.sent,Number of BatchRequests sent across all KV Streamer operators,Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,kv.streamer.batches.throttled,"Number of BatchRequests currently being throttled due to reaching the concurrency limit, across all KV Streamer operators",Batches,GAUGE,COUNT,AVG,NONE +APPLICATION,kv.streamer.operators.active,Number of KV Streamer operators currently in use,Operators,GAUGE,COUNT,AVG,NONE +APPLICATION,logical_replication.batch_hist_nanos,Time spent flushing a batch,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,logical_replication.catchup_ranges,Source side ranges undergoing catch up scans (inaccurate with multiple LDR jobs),Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,logical_replication.catchup_ranges_by_label,Source side ranges undergoing catch up scans,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,logical_replication.checkpoint_events_ingested,Checkpoint events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.commit_latency,"Event commit latency: a difference between event MVCC timestamp and the time it was flushed into disk. If we batch events, then the difference between the oldest event in the batch and flush is recorded",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,logical_replication.events_dlqed,Row update events sent to DLQ,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_dlqed_age,Row update events sent to DLQ due to reaching the maximum time allowed in the retry queue,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_dlqed_by_label,Row update events sent to DLQ by label,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_dlqed_errtype,Row update events sent to DLQ due to an error not considered retryable,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_dlqed_space,Row update events sent to DLQ due to capacity of the retry queue,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_ingested,Events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_ingested_by_label,Events ingested by all replication jobs by label,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_initial_failure,Failed attempts to apply an incoming row update,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_initial_success,Successful applications of an incoming row update,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_retry_failure,Failed re-attempts to apply a row update,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.events_retry_success,Row update events applied after one or more retries,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.kv.update_too_old,Total number of updates that were not applied because they were too old,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.kv.value_refreshes,Total number of batches that refreshed the previous value,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.logical_bytes,Logical bytes (sum of keys + values) received by all replication jobs,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.replan_count,Total number of dist sql replanning events,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,logical_replication.replicated_time_by_label,Replicated time of the logical replication stream by label,Seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,logical_replication.replicated_time_seconds,The replicated time of the logical replication stream in seconds since the unix epoch.,Seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,logical_replication.retry_queue_bytes,Logical bytes (sum of keys+values) in the retry queue,Bytes,GAUGE,BYTES,AVG,NONE +APPLICATION,logical_replication.retry_queue_events,Row update events in the retry queue,Events,GAUGE,COUNT,AVG,NONE +APPLICATION,logical_replication.scanning_ranges,Source side ranges undergoing an initial scan (inaccurate with multiple LDR jobs),Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,logical_replication.scanning_ranges_by_label,Source side ranges undergoing an initial scan,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,obs.tablemetadata.update_job.duration,Time spent running the update table metadata job.,Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,obs.tablemetadata.update_job.errors,The total number of errors that have been emitted from the update table metadata job.,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,obs.tablemetadata.update_job.runs,The total number of runs of the update table metadata job.,Executions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,obs.tablemetadata.update_job.table_updates,The total number of rows that have been updated in system.table_metadata,Rows Updated,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,physical_replication.admit_latency,Event admission latency: a difference between event MVCC timestamp and the time it was admitted into ingestion processor,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,physical_replication.commit_latency,"Event commit latency: a difference between event MVCC timestamp and the time it was flushed into disk. If we batch events, then the difference between the oldest event in the batch and flush is recorded",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,physical_replication.distsql_replan_count,Total number of dist sql replanning events,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,physical_replication.events_ingested,Events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,physical_replication.failover_progress,The number of ranges left to revert in order to complete an inflight cutover,Ranges,GAUGE,COUNT,AVG,NONE +APPLICATION,physical_replication.flush_hist_nanos,Time spent flushing messages across all replication streams,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,physical_replication.flushes,Total flushes across all replication jobs,Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,physical_replication.logical_bytes,Logical bytes (sum of keys + values) ingested by all replication jobs,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,physical_replication.replicated_time_seconds,The replicated time of the physical replication stream in seconds since the unix epoch.,Seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,physical_replication.resolved_events_ingested,Resolved events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,physical_replication.running,Number of currently running replication streams,Replication Streams,GAUGE,COUNT,AVG,NONE +APPLICATION,requests.slow.distsender,"Number of range-bound RPCs currently stuck or retrying for a long time. + +Note that this is not a good signal for KV health. The remote side of the +RPCs tracked here may experience contention, so an end user can easily +cause values for this metric to be emitted by leaving a transaction open +for a long time and contending with it using a second transaction.",Requests,GAUGE,COUNT,AVG,NONE +APPLICATION,round-trip-latency,"Distribution of round-trip latencies with other nodes. + +This only reflects successful heartbeats and measures gRPC overhead as well as +possible head-of-line blocking. Elevated values in this metric may hint at +network issues and/or saturation, but they are no proof of them. CPU overload +can similarly elevate this metric. The operator should look towards OS-level +metrics such as packet loss, retransmits, etc, to conclusively diagnose network +issues. Heartbeats are not very frequent (~seconds), so they may not capture +rare or short-lived degradations. +",Round-trip time,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,rpc.client.bytes.egress,Counter of TCP bytes sent via gRPC on connections we initiated.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,rpc.client.bytes.ingress,Counter of TCP bytes received via gRPC on connections we initiated.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,rpc.connection.avg_round_trip_latency,"Sum of exponentially weighted moving average of round-trip latencies, as measured through a gRPC RPC. + +Dividing this Gauge by rpc.connection.healthy gives an approximation of average +latency, but the top-level round-trip-latency histogram is more useful. Instead, +users should consult the label families of this metric if they are available +(which requires prometheus and the cluster setting 'server.child_metrics.enabled'); +these provide per-peer moving averages. + +This metric does not track failed connection. A failed connection's contribution +is reset to zero. +",Latency,GAUGE,NANOSECONDS,AVG,NONE +APPLICATION,rpc.connection.connected,"Counter of TCP level connected connections. + +This metric is the number of gRPC connections from the TCP level. Unlike rpc.connection.healthy +this metric does not take into account whether the application has been able to heartbeat +over this connection. +",Connections,GAUGE,COUNT,AVG,NONE +APPLICATION,rpc.connection.failures,"Counter of failed connections. + +This includes both the event in which a healthy connection terminates as well as +unsuccessful reconnection attempts. + +Connections that are terminated as part of local node shutdown are excluded. +Decommissioned peers are excluded. +",Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,rpc.connection.healthy,Gauge of current connections in a healthy state (i.e. bidirectionally connected and heartbeating),Connections,GAUGE,COUNT,AVG,NONE +APPLICATION,rpc.connection.healthy_nanos,"Gauge of nanoseconds of healthy connection time + +On the prometheus endpoint scraped with the cluster setting 'server.child_metrics.enabled' set, +the constituent parts of this metric are available on a per-peer basis and one can read off +for how long a given peer has been connected",Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +APPLICATION,rpc.connection.heartbeats,Counter of successful heartbeats.,Heartbeats,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,rpc.connection.inactive,"Gauge of current connections in an inactive state and pending deletion; these are not healthy but are not tracked as unhealthy either because there is reason to believe that the connection is no longer relevant,for example if the node has since been seen under a new address",Connections,GAUGE,COUNT,AVG,NONE +APPLICATION,rpc.connection.unhealthy,Gauge of current connections in an unhealthy state (not bidirectionally connected or heartbeating),Connections,GAUGE,COUNT,AVG,NONE +APPLICATION,rpc.connection.unhealthy_nanos,"Gauge of nanoseconds of unhealthy connection time. + +On the prometheus endpoint scraped with the cluster setting 'server.child_metrics.enabled' set, +the constituent parts of this metric are available on a per-peer basis and one can read off +for how long a given peer has been unreachable",Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE +APPLICATION,schedules.BACKUP.failed,Number of BACKUP jobs failed,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.BACKUP.last-completed-time,The unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric,Jobs,GAUGE,TIMESTAMP_SEC,AVG,NONE +APPLICATION,schedules.BACKUP.protected_age_sec,The age of the oldest PTS record protected by BACKUP schedules,Seconds,GAUGE,SECONDS,AVG,NONE +APPLICATION,schedules.BACKUP.protected_record_count,Number of PTS records held by BACKUP schedules,Records,GAUGE,COUNT,AVG,NONE +APPLICATION,schedules.BACKUP.started,Number of BACKUP jobs started,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.BACKUP.succeeded,Number of BACKUP jobs succeeded,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.CHANGEFEED.failed,Number of CHANGEFEED jobs failed,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.CHANGEFEED.started,Number of CHANGEFEED jobs started,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.CHANGEFEED.succeeded,Number of CHANGEFEED jobs succeeded,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.error,Number of schedules which did not execute successfully,Schedules,GAUGE,COUNT,AVG,NONE +APPLICATION,schedules.malformed,Number of malformed schedules,Schedules,GAUGE,COUNT,AVG,NONE +APPLICATION,schedules.round.jobs-started,The number of jobs started,Jobs,GAUGE,COUNT,AVG,NONE +APPLICATION,schedules.round.reschedule-skip,The number of schedules rescheduled due to SKIP policy,Schedules,GAUGE,COUNT,AVG,NONE +APPLICATION,schedules.round.reschedule-wait,The number of schedules rescheduled due to WAIT policy,Schedules,GAUGE,COUNT,AVG,NONE +APPLICATION,schedules.scheduled-row-level-ttl-executor.failed,Number of scheduled-row-level-ttl-executor jobs failed,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-row-level-ttl-executor.started,Number of scheduled-row-level-ttl-executor jobs started,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-row-level-ttl-executor.succeeded,Number of scheduled-row-level-ttl-executor jobs succeeded,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-schema-telemetry-executor.failed,Number of scheduled-schema-telemetry-executor jobs failed,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-schema-telemetry-executor.started,Number of scheduled-schema-telemetry-executor jobs started,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-schema-telemetry-executor.succeeded,Number of scheduled-schema-telemetry-executor jobs succeeded,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-sql-stats-compaction-executor.failed,Number of scheduled-sql-stats-compaction-executor jobs failed,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-sql-stats-compaction-executor.started,Number of scheduled-sql-stats-compaction-executor jobs started,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,schedules.scheduled-sql-stats-compaction-executor.succeeded,Number of scheduled-sql-stats-compaction-executor jobs succeeded,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,server.http.request.duration.nanos,Duration of an HTTP request in nanoseconds.,Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.bytesin,Number of SQL bytes received,SQL Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.bytesout,Number of SQL bytes sent,SQL Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.conn.failures,Number of SQL connection failures,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.conn.latency,Latency to establish and authenticate a SQL connection,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.conns,Number of open SQL connections,Connections,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.conns_waiting_to_hash,Number of SQL connection attempts that are being throttled in order to limit password hashing concurrency,Connections,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.contention.resolver.failed_resolutions,Number of failed transaction ID resolution attempts,Failed transaction ID resolution count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.contention.resolver.queue_size,Length of queued unresolved contention events,Queue length,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.contention.resolver.retries,Number of times transaction id resolution has been retried,Retry count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.contention.txn_id_cache.miss,Number of cache misses,Cache miss,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.contention.txn_id_cache.read,Number of cache read,Cache read,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.count,Number of COPY SQL statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.count.internal,Number of COPY SQL statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.nonatomic.count,Number of non-atomic COPY SQL statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.nonatomic.count.internal,Number of non-atomic COPY SQL statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.nonatomic.started.count,Number of non-atomic COPY SQL statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.nonatomic.started.count.internal,Number of non-atomic COPY SQL statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.started.count,Number of COPY SQL statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.copy.started.count.internal,Number of COPY SQL statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.crud_query.count,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.crud_query.count.internal,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.crud_query.started.count,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements started",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.crud_query.started.count.internal,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements started (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.ddl.count,Number of SQL DDL statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.ddl.count.internal,Number of SQL DDL statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.ddl.started.count,Number of SQL DDL statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.ddl.started.count.internal,Number of SQL DDL statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.delete.count,Number of SQL DELETE statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.delete.count.internal,Number of SQL DELETE statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.delete.started.count,Number of SQL DELETE statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.delete.started.count.internal,Number of SQL DELETE statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.disk.distsql.current,Current sql statement disk usage for distsql,Disk,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.disk.distsql.max,Disk usage per sql statement for distsql,Disk,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.disk.distsql.spilled.bytes.read,Number of bytes read from temporary disk storage as a result of spilling,Disk,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.disk.distsql.spilled.bytes.written,Number of bytes written to temporary disk storage as a result of spilling,Disk,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.contended_queries.count,Number of SQL queries that experienced contention,Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.cumulative_contention_nanos,Cumulative contention across all queries (in nanoseconds),Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.dist_query_rerun_locally.count,Total number of cases when distributed query error resulted in a local rerun,Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.dist_query_rerun_locally.failure_count,Total number of cases when the local rerun of a distributed query resulted in an error,Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.distributed_exec.count,Number of invocations of the execution engine executed with full or partial distribution (multiple of which may occur for a single SQL statement),DistSQL runs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.exec.latency,Latency of DistSQL statement execution,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.distsql.exec.latency.internal,Latency of DistSQL statement execution (internal queries),SQL Internal Statements,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.distsql.flows.active,Number of distributed SQL flows currently active,Flows,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.distsql.flows.total,Number of distributed SQL flows executed,Flows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.queries.active,Number of invocations of the execution engine currently active (multiple of which may occur for a single SQL statement),DistSQL runs,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.distsql.queries.spilled,Number of queries that have spilled to disk,Queries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.queries.total,Number of invocations of the execution engine executed (multiple of which may occur for a single SQL statement),DistSQL runs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.select.count,Number of SELECT statements planned to be distributed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.select.count.internal,Number of SELECT statements planned to be distributed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.select.distributed_exec.count,Number of SELECT statements that were distributed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.select.distributed_exec.count.internal,Number of SELECT statements that were distributed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.distsql.service.latency,Latency of DistSQL request execution,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.distsql.service.latency.internal,Latency of DistSQL request execution (internal queries),SQL Internal Statements,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.distsql.vec.openfds,Current number of open file descriptors used by vectorized external storage,Files,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.exec.latency,Latency of SQL statement execution,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.exec.latency.detail,"Latency of SQL statement execution, by statement fingerprint",Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.exec.latency.detail.internal,"Latency of SQL statement execution, by statement fingerprint (internal queries)",SQL Internal Statements,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.exec.latency.internal,Latency of SQL statement execution (internal queries),SQL Internal Statements,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.failure.count,Number of statements resulting in a planning or runtime error,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.failure.count.internal,Number of statements resulting in a planning or runtime error (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.feature_flag_denial,Counter of the number of statements denied by a feature flag,Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.full.scan.count,Number of full table or index scans,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.full.scan.count.internal,Number of full table or index scans (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.full_scan_rejected.count,Number of full table or index scans that have been rejected because of `disallow_full_table_scans` guardrail,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.full_scan_rejected.count.internal,Number of full table or index scans that have been rejected because of `disallow_full_table_scans` guardrail (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.max_row_size_err.count,Number of rows observed violating sql.guardrails.max_row_size_err,Rows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.max_row_size_err.count.internal,Number of rows observed violating sql.guardrails.max_row_size_err (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.max_row_size_log.count,Number of rows observed violating sql.guardrails.max_row_size_log,Rows,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.max_row_size_log.count.internal,Number of rows observed violating sql.guardrails.max_row_size_log (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_read_err.count,Number of transactions errored because of transaction_rows_read_err guardrail,Errored transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_read_err.count.internal,Number of transactions errored because of transaction_rows_read_err guardrail (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_read_log.count,Number of transactions logged because of transaction_rows_read_log guardrail,Logged transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_read_log.count.internal,Number of transactions logged because of transaction_rows_read_log guardrail (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_written_err.count,Number of transactions errored because of transaction_rows_written_err guardrail,Errored transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_written_err.count.internal,Number of transactions errored because of transaction_rows_written_err guardrail (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_written_log.count,Number of transactions logged because of transaction_rows_written_log guardrail,Logged transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.guardrails.transaction_rows_written_log.count.internal,Number of transactions logged because of transaction_rows_written_log guardrail (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_schema_cache.hits,counter on the number of cache hits,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_schema_cache.misses,counter on the number of cache misses,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_table_cache.hits,counter on the number of cache hits,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_table_cache.misses,counter on the number of cache misses,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_type_cache.hits,counter on the number of cache hits,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_type_cache.misses,counter on the number of cache misses,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_udf_cache.hits,counter on the number of cache hits,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.hydrated_udf_cache.misses,counter on the number of cache misses,reads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.insert.count,Number of SQL INSERT statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.insert.count.internal,Number of SQL INSERT statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.insert.started.count,Number of SQL INSERT statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.insert.started.count.internal,Number of SQL INSERT statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.insights.anomaly_detection.evictions,Evictions of fingerprint latency summaries due to memory pressure,Evictions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.insights.anomaly_detection.fingerprints,Current number of statement fingerprints being monitored for anomaly detection,Fingerprints,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.insights.anomaly_detection.memory,Current memory used to support anomaly detection,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.leases.active,The number of outstanding SQL schema leases.,Outstanding leases,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.leases.expired,The number of outstanding session based SQL schema leases expired.,Leases expired because of a new version,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.leases.long_wait_for_initial_version,The number of wait for initial version routines taking more than the lease duration.,Number of wait for initial version routines executing,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.leases.long_wait_for_no_version,The number of wait for no versions that are taking more than the lease duration.,Number of wait for long wait for no version routines executing,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.leases.long_wait_for_one_version,The number of wait for one versions that are taking more than the lease duration.,Number of wait for long wait for one version routines executing,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.leases.long_wait_for_two_version_invariant,The number of two version invariant waits that are taking more than the lease duration.,Number of two version invariant wait routines executing,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.leases.waiting_to_expire,The number of outstanding session based SQL schema leases with expiry.,Outstanding Leases Waiting to Expire,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.mem.bulk.current,Current sql statement memory usage for bulk operations,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.bulk.max,Memory usage per sql statement for bulk operations,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.conns.current,Current sql statement memory usage for conns,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.conns.max,Memory usage per sql statement for conns,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.distsql.current,Current sql statement memory usage for distsql,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.distsql.max,Memory usage per sql statement for distsql,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.current,Current sql statement memory usage for internal,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.max,Memory usage per sql statement for internal,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.session.current,Current sql session memory usage for internal,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.session.max,Memory usage per sql session for internal,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.session.prepared.current,Current sql session memory usage by prepared statements for internal,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.session.prepared.max,Memory usage by prepared statements per sql session for internal,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.txn.current,Current sql transaction memory usage for internal,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.internal.txn.max,Memory usage per sql transaction for internal,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.root.current,Current sql statement memory usage for root,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.root.max,Memory usage per sql statement for root,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.current,Current sql statement memory usage for sql,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.max,Memory usage per sql statement for sql,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.session.current,Current sql session memory usage for sql,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.session.max,Memory usage per sql session for sql,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.session.prepared.current,Current sql session memory usage by prepared statements for sql,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.session.prepared.max,Memory usage by prepared statements per sql session for sql,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.txn.current,Current sql transaction memory usage for sql,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.mem.sql.txn.max,Memory usage per sql transaction for sql,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.misc.count,Number of other SQL statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.misc.count.internal,Number of other SQL statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.misc.started.count,Number of other SQL statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.misc.started.count.internal,Number of other SQL statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.new_conns,Number of SQL connections created,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.optimizer.fallback.count,Number of statements which the cost-based optimizer was unable to plan,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.optimizer.fallback.count.internal,Number of statements which the cost-based optimizer was unable to plan (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.optimizer.plan_cache.hits,Number of non-prepared statements for which a cached plan was used,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.optimizer.plan_cache.hits.internal,Number of non-prepared statements for which a cached plan was used (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.optimizer.plan_cache.misses,Number of non-prepared statements for which a cached plan was not used,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.optimizer.plan_cache.misses.internal,Number of non-prepared statements for which a cached plan was not used (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pgwire.pipeline.count,Number of pgwire commands received by the server that have not yet begun processing,Commands,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.pgwire_cancel.ignored,Number of pgwire query cancel requests that were ignored due to rate limiting,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pgwire_cancel.successful,Number of pgwire query cancel requests that were successful,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pgwire_cancel.total,Number of pgwire query cancel requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pre_serve.bytesin,Number of SQL bytes received prior to routing the connection to the target SQL server,SQL Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pre_serve.bytesout,Number of SQL bytes sent prior to routing the connection to the target SQL server,SQL Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pre_serve.conn.failures,Number of SQL connection failures prior to routing the connection to the target SQL server,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.pre_serve.mem.cur,Current memory usage for SQL connections prior to routing the connection to the target SQL server,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.pre_serve.mem.max,Memory usage for SQL connections prior to routing the connection to the target SQL server,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.pre_serve.new_conns,Number of SQL connections created prior to routing the connection to the target SQL server,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.query.count,"Number of SQL operations started including queries, and transaction control statements",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.query.count.internal,"Number of SQL operations started including queries, and transaction control statements (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.query.started.count,"Number of SQL operations started including queries, and transaction control statements",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.query.started.count.internal,"Number of SQL operations started including queries, and transaction control statements (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.query.unique.count,Cardinality estimate of the set of statement fingerprints,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.query.unique.count.internal,Cardinality estimate of the set of statement fingerprints (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.count,Number of `SAVEPOINT cockroach_restart` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.count.internal,Number of `SAVEPOINT cockroach_restart` statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.release.count,Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.release.count.internal,Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.release.started.count,Number of `RELEASE SAVEPOINT cockroach_restart` statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.release.started.count.internal,Number of `RELEASE SAVEPOINT cockroach_restart` statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.rollback.count,Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.rollback.count.internal,Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.rollback.started.count,Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.rollback.started.count.internal,Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.started.count,Number of `SAVEPOINT cockroach_restart` statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.restart_savepoint.started.count.internal,Number of `SAVEPOINT cockroach_restart` statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.count,Number of SQL SAVEPOINT statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.count.internal,Number of SQL SAVEPOINT statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.release.count,Number of `RELEASE SAVEPOINT` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.release.count.internal,Number of `RELEASE SAVEPOINT` statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.release.started.count,Number of `RELEASE SAVEPOINT` statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.release.started.count.internal,Number of `RELEASE SAVEPOINT` statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.rollback.count,Number of `ROLLBACK TO SAVEPOINT` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.rollback.count.internal,Number of `ROLLBACK TO SAVEPOINT` statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.rollback.started.count,Number of `ROLLBACK TO SAVEPOINT` statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.rollback.started.count.internal,Number of `ROLLBACK TO SAVEPOINT` statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.started.count,Number of SQL SAVEPOINT statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.savepoint.started.count.internal,Number of SQL SAVEPOINT statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.schema.invalid_objects,Gauge of detected invalid objects within the system.descriptor table (measured by querying crdb_internal.invalid_objects),Objects,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.schema_changer.object_count,Counter of the number of objects in the cluster,Objects,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.select.count,Number of SQL SELECT statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.select.count.internal,Number of SQL SELECT statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.select.started.count,Number of SQL SELECT statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.select.started.count.internal,Number of SQL SELECT statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.service.latency,Latency of SQL request execution,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.service.latency.internal,Latency of SQL request execution (internal queries),SQL Internal Statements,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.statements.active,Number of currently active user SQL statements,Active Statements,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.statements.active.internal,Number of currently active user SQL statements (internal queries),SQL Internal Statements,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.stats.activity.update.latency,The latency of updates made by the SQL activity updater job. Includes failed update attempts,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.stats.activity.updates.failed,Number of update attempts made by the SQL activity updater job that failed with errors,failed updates,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.activity.updates.successful,Number of successful updates made by the SQL activity updater job,successful updates,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.cleanup.rows_removed,Number of stale statistics rows that are removed,SQL Stats Cleanup,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.discarded.current,Number of fingerprint statistics being discarded,Discarded SQL Stats,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.flush.done_signals.ignored,Number of times the SQL Stats activity update job ignored the signal sent to it indicating a flush has completed,flush done signals ignored,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.flush.fingerprint.count,The number of unique statement and transaction fingerprints included in the SQL Stats flush,statement & transaction fingerprints,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.flush.latency,The latency of SQL Stats flushes to persistent storage. Includes failed flush attempts,nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.stats.flushes.failed,Number of attempted SQL Stats flushes that failed with errors,failed flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.flushes.successful,Number of times SQL Stats are flushed successfully to persistent storage,successful flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.stats.mem.current,Current memory usage for fingerprint storage,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.stats.mem.max,Memory usage for fingerprint storage,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.stats.reported.mem.current,Current memory usage for reported fingerprint storage,Memory,GAUGE,BYTES,AVG,NONE +APPLICATION,sql.stats.reported.mem.max,Memory usage for reported fingerprint storage,Memory,HISTOGRAM,BYTES,AVG,NONE +APPLICATION,sql.stats.txn_stats_collection.duration,Time took in nanoseconds to collect transaction stats,SQL Transaction Stats Collection Overhead,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.temp_object_cleaner.active_cleaners,number of cleaner tasks currently running on this node,Count,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.temp_object_cleaner.schemas_deletion_error,number of errored schema deletions by the temp object cleaner on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.temp_object_cleaner.schemas_deletion_success,number of successful schema deletions by the temp object cleaner on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.temp_object_cleaner.schemas_to_delete,number of schemas to be deleted by the temp object cleaner on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.abort.count,Number of SQL transaction abort errors,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.abort.count.internal,Number of SQL transaction abort errors (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.begin.count,Number of SQL transaction BEGIN statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.begin.count.internal,Number of SQL transaction BEGIN statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.begin.started.count,Number of SQL transaction BEGIN statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.begin.started.count.internal,Number of SQL transaction BEGIN statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit.count,Number of SQL transaction COMMIT statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit.count.internal,Number of SQL transaction COMMIT statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit.started.count,Number of SQL transaction COMMIT statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit.started.count.internal,Number of SQL transaction COMMIT statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit_prepared.count,Number of SQL COMMIT PREPARED statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit_prepared.count.internal,Number of SQL COMMIT PREPARED statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit_prepared.started.count,Number of SQL COMMIT PREPARED statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.commit_prepared.started.count.internal,Number of SQL COMMIT PREPARED statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.contended.count,Number of SQL transactions experienced contention,Contention,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.contended.count.internal,Number of SQL transactions experienced contention (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.latency,Latency of SQL transactions,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.txn.latency.internal,Latency of SQL transactions (internal queries),SQL Internal Statements,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,sql.txn.prepare.count,Number of SQL PREPARE TRANSACTION statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.prepare.count.internal,Number of SQL PREPARE TRANSACTION statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.prepare.started.count,Number of SQL PREPARE TRANSACTION statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.prepare.started.count.internal,Number of SQL PREPARE TRANSACTION statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback.count,Number of SQL transaction ROLLBACK statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback.count.internal,Number of SQL transaction ROLLBACK statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback.started.count,Number of SQL transaction ROLLBACK statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback.started.count.internal,Number of SQL transaction ROLLBACK statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback_prepared.count,Number of SQL ROLLBACK PREPARED statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback_prepared.count.internal,Number of SQL ROLLBACK PREPARED statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback_prepared.started.count,Number of SQL ROLLBACK PREPARED statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.rollback_prepared.started.count.internal,Number of SQL ROLLBACK PREPARED statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.upgraded_iso_level.count,Number of times a weak isolation level was automatically upgraded to a stronger one,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txn.upgraded_iso_level.count.internal,Number of times a weak isolation level was automatically upgraded to a stronger one (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.txns.open,Number of currently open user SQL transactions,Open SQL Transactions,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.txns.open.internal,Number of currently open user SQL transactions (internal queries),SQL Internal Statements,GAUGE,COUNT,AVG,NONE +APPLICATION,sql.update.count,Number of SQL UPDATE statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.update.count.internal,Number of SQL UPDATE statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.update.started.count,Number of SQL UPDATE statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sql.update.started.count.internal,Number of SQL UPDATE statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sqlliveness.is_alive.cache_hits,Number of calls to IsAlive that return from the cache,Calls,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sqlliveness.is_alive.cache_misses,Number of calls to IsAlive that do not return from the cache,Calls,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sqlliveness.sessions_deleted,Number of expired sessions which have been deleted,Sessions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sqlliveness.sessions_deletion_runs,Number of calls to delete sessions which have been performed,Sessions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sqlliveness.write_failures,Number of update or insert calls which have failed,Writes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,sqlliveness.write_successes,Number of update or insert calls successfully performed,Writes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.cost_client.blocked_requests,Number of requests currently blocked by the rate limiter,Requests,GAUGE,COUNT,AVG,NONE +APPLICATION,tenant.sql_usage.cross_region_network_ru,Total number of RUs charged for cross-region network traffic,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.estimated_cpu_seconds,Estimated amount of CPU consumed by a virtual cluster,CPU Seconds,COUNTER,SECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.estimated_kv_cpu_seconds,"Estimated amount of CPU consumed by a virtual cluster, in the KV layer",CPU Seconds,COUNTER,SECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.estimated_replication_bytes,Total number of estimated bytes for KV replication traffic,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.external_io_egress_bytes,Total number of bytes written to external services such as cloud storage providers,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.external_io_ingress_bytes,Total number of bytes read from external services such as cloud storage providers,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.kv_request_units,RU consumption attributable to KV,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.pgwire_egress_bytes,Total number of bytes transferred from a SQL pod to the client,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.provisioned_vcpus,Number of vcpus available to the virtual cluster,Count,GAUGE,COUNT,AVG,NONE +APPLICATION,tenant.sql_usage.read_batches,Total number of KV read batches,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.read_bytes,Total number of bytes read from KV,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.read_requests,Total number of KV read requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.request_units,RU consumption,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.sql_pods_cpu_seconds,Total amount of CPU used by SQL pods,CPU Seconds,COUNTER,SECONDS,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.write_batches,Total number of KV write batches,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.write_bytes,Total number of bytes written to KV,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,tenant.sql_usage.write_requests,Total number of KV write requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.aborts,Number of aborted KV transactions,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.commit_waits,Number of KV transactions that had to commit-wait on commit in order to ensure linearizability. This generally happens to transactions writing to global ranges.,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.commits,Number of committed KV transactions (including 1PC),KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.commits1PC,Number of KV transaction one-phase commits,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.commits_read_only,Number of read only KV transaction commits,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.condensed_intent_spans,KV transactions that have exceeded their intent tracking memory budget (kv.transaction.max_intents_bytes). See also txn.condensed_intent_spans_gauge for a gauge of such transactions currently running.,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.condensed_intent_spans_gauge,KV transactions currently running that have exceeded their intent tracking memory budget (kv.transaction.max_intents_bytes). See also txn.condensed_intent_spans for a perpetual counter/rate.,KV Transactions,GAUGE,COUNT,AVG,NONE +APPLICATION,txn.condensed_intent_spans_rejected,KV transactions that have been aborted because they exceeded their intent tracking memory budget (kv.transaction.max_intents_bytes). Rejection is caused by kv.transaction.reject_over_max_intents_budget.,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.count_limit_on_response,KV transactions that have exceeded the count limit on a response,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.count_limit_rejected,KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.durations,KV transaction durations,KV Txn Duration,HISTOGRAM,NANOSECONDS,AVG,NONE +APPLICATION,txn.inflight_locks_over_tracking_budget,KV transactions whose in-flight writes and locking reads have exceeded the intent tracking memory budget (kv.transaction.max_intents_bytes).,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.parallelcommits,Number of KV transaction parallel commits,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.parallelcommits.auto_retries,Number of commit tries after successful failed parallel commit attempts,Retries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.prepares,Number of prepared KV transactions,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.refresh.auto_retries,Number of request retries after successful client-side refreshes,Retries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.refresh.fail,Number of failed client-side transaction refreshes,Refreshes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.refresh.fail_with_condensed_spans,"Number of failed client-side refreshes for transactions whose read tracking lost fidelity because of condensing. Such a failure could be a false conflict. Failures counted here are also counted in txn.refresh.fail, and the respective transactions are also counted in txn.refresh.memory_limit_exceeded.",Refreshes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.refresh.memory_limit_exceeded,"Number of transaction which exceed the refresh span bytes limit, causing their read spans to be condensed",Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.refresh.success,"Number of successful client-side transaction refreshes. A refresh may be preemptive or reactive. A reactive refresh is performed after a request throws an error because a refresh is needed for it to succeed. In these cases, the request will be re-issued as an auto-retry (see txn.refresh.auto_retries) after the refresh succeeds.",Refreshes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.refresh.success_server_side,Number of successful server-side transaction refreshes,Refreshes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts,Number of restarted KV transactions,KV Transactions,HISTOGRAM,COUNT,AVG,NONE +APPLICATION,txn.restarts.asyncwritefailure,Number of restarts due to async consensus writes that failed to leave intents,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.commitdeadlineexceeded,Number of restarts due to a transaction exceeding its deadline,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.readwithinuncertainty,Number of restarts due to reading a new value within the uncertainty interval,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.serializable,Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.txnaborted,Number of restarts due to an abort by a concurrent transaction (usually due to deadlock),Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.txnpush,Number of restarts due to a transaction push failure,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.unknown,Number of restarts due to a unknown reasons,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.restarts.writetooold,Number of restarts due to a concurrent writer committing first,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.rollbacks.async.failed,Number of KV transaction that failed to send abort asynchronously which is not always retried,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +APPLICATION,txn.rollbacks.failed,Number of KV transaction that failed to send final abort,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,build.timestamp,Build information,Build Time,GAUGE,TIMESTAMP_SEC,AVG,NONE +SERVER,go.scheduler_latency,Go scheduling latency,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE +SERVER,log.buffered.messages.dropped,"Count of log messages that are dropped by buffered log sinks. When CRDB attempts to buffer a log message in a buffered log sink whose buffer is already full, it drops the oldest buffered messages to make space for the new message",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,log.fluent.sink.conn.attempts,Number of connection attempts experienced by fluent-server logging sinks,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,log.fluent.sink.conn.errors,Number of connection errors experienced by fluent-server logging sinks,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,log.fluent.sink.write.attempts,Number of write attempts experienced by fluent-server logging sinks,Attempts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,log.fluent.sink.write.errors,Number of write errors experienced by fluent-server logging sinks,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,log.messages.count,Count of messages logged on the node since startup. Note that this does not measure the fan-out of single log messages to the various configured logging sinks.,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.cgo.allocbytes,Current bytes of memory allocated by cgo,Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.cgo.totalbytes,"Total bytes of memory allocated by cgo, but not released",Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.cgocalls,Total number of cgo calls,cgo Calls,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.cpu.combined.percent-normalized,"Current user+system cpu percentage consumed by the CRDB process, normalized 0-1 by number of cores",CPU Time,GAUGE,PERCENT,AVG,NONE +SERVER,sys.cpu.host.combined.percent-normalized,"Current user+system cpu percentage across the whole machine, normalized 0-1 by number of cores",CPU Time,GAUGE,PERCENT,AVG,NONE +SERVER,sys.cpu.now.ns,"The time when CPU measurements were taken, as nanoseconds since epoch",CPU Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.cpu.sys.ns,Total system cpu time consumed by the CRDB process,CPU Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.cpu.sys.percent,Current system cpu percentage consumed by the CRDB process,CPU Time,GAUGE,PERCENT,AVG,NONE +SERVER,sys.cpu.user.ns,Total user cpu time consumed by the CRDB process,CPU Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.cpu.user.percent,Current user cpu percentage consumed by the CRDB process,CPU Time,GAUGE,PERCENT,AVG,NONE +SERVER,sys.fd.open,Process open file descriptors,File Descriptors,GAUGE,COUNT,AVG,NONE +SERVER,sys.fd.softlimit,Process open FD soft limit,File Descriptors,GAUGE,COUNT,AVG,NONE +SERVER,sys.gc.assist.ns,Estimated total CPU time user goroutines spent to assist the GC process,CPU Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.gc.count,Total number of GC runs,GC Runs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.gc.pause.ns,Total GC pause,GC Pause,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.gc.pause.percent,Current GC pause percentage,GC Pause,GAUGE,PERCENT,AVG,NONE +SERVER,sys.gc.stop.ns,Estimated GC stop-the-world stopping latencies,GC Stopping,GAUGE,NANOSECONDS,AVG,NONE +SERVER,sys.go.allocbytes,Current bytes of memory allocated by go,Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.go.heap.allocbytes,Cumulative bytes allocated for heap objects.,Memory,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.go.heap.heapfragmentbytes,"Total heap fragmentation bytes, derived from bytes in in-use spans minus bytes allocated",Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.go.heap.heapreleasedbytes,Total bytes returned to the OS from heap.,Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.go.heap.heapreservedbytes,"Total bytes reserved by heap, derived from bytes in idle (unused) spans subtracts bytes returned to the OS",Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.go.pause.other.ns,Estimated non-GC-related total pause time,Non-GC Pause,GAUGE,NANOSECONDS,AVG,NONE +SERVER,sys.go.stack.systembytes,Stack memory obtained from the OS.,Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.go.stop.other.ns,Estimated non-GC-related stop-the-world stopping latencies,Non-GC Stopping,GAUGE,NANOSECONDS,AVG,NONE +SERVER,sys.go.totalbytes,"Total bytes of memory allocated by go, but not released",Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.goroutines,Current number of goroutines,goroutines,GAUGE,COUNT,AVG,NONE +SERVER,sys.host.disk.io.time,Time spent reading from or writing to all disks since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.iopsinprogress,IO operations currently in progress on this host (as reported by the OS),Operations,GAUGE,COUNT,AVG,NONE +SERVER,sys.host.disk.read.bytes,Bytes read from all disks since this process started (as reported by the OS),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.read.count,Disk read operations across all disks since this process started (as reported by the OS),Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.read.time,Time spent reading from all disks since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.weightedio.time,Weighted time spent reading from or writing to all disks since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.write.bytes,Bytes written to all disks since this process started (as reported by the OS),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.write.count,Disk write operations across all disks since this process started (as reported by the OS),Operations,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.disk.write.time,Time spent writing to all disks since this process started (as reported by the OS),Time,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.recv.bytes,Bytes received on all network interfaces since this process started (as reported by the OS),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.recv.drop,Receiving packets that got dropped on all network interfaces since this process started (as reported by the OS),Packets,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.recv.err,Error receiving packets on all network interfaces since this process started (as reported by the OS),Packets,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.recv.packets,Packets received on all network interfaces since this process started (as reported by the OS),Packets,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.send.bytes,Bytes sent on all network interfaces since this process started (as reported by the OS),Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.send.drop,Sending packets that got dropped on all network interfaces since this process started (as reported by the OS),Packets,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.send.err,Error on sending packets on all network interfaces since this process started (as reported by the OS),Packets,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.host.net.send.packets,Packets sent on all network interfaces since this process started (as reported by the OS),Packets,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE +SERVER,sys.rss,Current process RSS,RSS,GAUGE,BYTES,AVG,NONE +SERVER,sys.runnable.goroutines.per.cpu,"Average number of goroutines that are waiting to run, normalized by number of cores",goroutines,GAUGE,COUNT,AVG,NONE +SERVER,sys.totalmem,Total memory (both free and used),Memory,GAUGE,BYTES,AVG,NONE +SERVER,sys.uptime,Process uptime,Uptime,COUNTER,SECONDS,AVG,NON_NEGATIVE_DERIVATIVE diff --git a/src/current/_data/v25.3/metrics/metrics.yml b/src/current/_data/v25.3/metrics/metrics.yml new file mode 100644 index 00000000000..61f9fc4d74d --- /dev/null +++ b/src/current/_data/v25.3/metrics/metrics.yml @@ -0,0 +1,753 @@ +# metrics.yml is a manually curated file of metrics that are included in the Canned Metrics for Serverless deployment. +# The metrics are in the order of appearance in the configuration file: +# https://github.com/cockroachlabs/managed-service/pull/16129/files +# console/assets/js/pages/clusterDetail/metrics/graphConfigs.tsx +# The data for the metrics was also sourced from +# https://github.com/cockroachdb/docs/blob/main/src/current/_includes/v23.2/essential-metrics.md +# +# The corresponding metrics-list.csv file was generated using the v23.2.0 binary with the following command: +# cockroach gen metric-list --format=csv > metrics-list.csv +# Once generated, the metrics-list.csv was manually modified to change the case of the headers to lowercase to work with liquid comparison code. + +- metric_id: sql.new_conns + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Connections Per Second" + datadog_id_selfhosted: sql.new_conns.count + essential: true + metric_type: SQL + metric_ui_tab: [ Overview, SQL ] + metric_ui_graph: "SQL Connections" + +- metric_id: sql.select.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Select" + datadog_id_selfhosted: sql.select.count + datadog_id_dedicated: sql.select.count + essential: true + metric_type: SQL + metric_ui_tab: [ Overview, SQL ] + metric_ui_graph: "SQL Statements" + +- metric_id: sql.update.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Update" + datadog_id_selfhosted: sql.update.count + datadog_id_dedicated: sql.update.count + essential: true + metric_type: SQL + metric_ui_tab: [ Overview, SQL ] + metric_ui_graph: "SQL Statements" + +- metric_id: sql.insert.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Insert" + datadog_id_selfhosted: sql.insert.count + datadog_id_dedicated: sql.insert.count + essential: true + metric_type: SQL + metric_ui_tab: [ Overview, SQL ] + metric_ui_graph: "SQL Statements" + +- metric_id: sql.delete.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Delete" + datadog_id_selfhosted: sql.delete.count + datadog_id_dedicated: sql.delete.count + essential: true + metric_type: SQL + metric_ui_tab: [ Overview, SQL ] + metric_ui_graph: "SQL Statements" + +- metric_id: sql.service.latency + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "P90, P99, P99.9, P99.99" + datadog_id_selfhosted: sql.service.latency + datadog_id_dedicated: sql.service.latency + essential: true + metric_type: SQL + metric_ui_tab: [ Overview, SQL ] + metric_ui_graph: "Service Latency: SQL Statements" + +- metric_id: tenant.consumption.request_units + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "RU, Average RUs" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ Overview, "Request Units"] + metric_ui_graph: "Request Units" + +- metric_id: livebytes + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Storage usage" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: Storage + metric_ui_tab: [ Overview ] + metric_ui_graph: Storage + +- metric_id: tenant.consumption.sql_pods_cpu_seconds + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Total amount of CPU used by SQL pods" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "CPU" + +- metric_id: tenant.consumption.pgwire_egress_bytes + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Client traffic" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Egress" + +- metric_id: tenant.consumption.external_io_egress_bytes + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Bulk I/O operations" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Egress" + +- metric_id: tenant.consumption.read_requests + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Requests" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Reads" + +- metric_id: tenant.consumption.read_batches + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Batches" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Reads" + +- metric_id: tenant.consumption.read_bytes + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Bytes" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Reads" + +- metric_id: tenant.consumption.write_requests + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Requests" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Writes" + +- metric_id: tenant.consumption.write_batches + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Batches" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Writes" + +- metric_id: tenant.consumption.write_bytes + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Bytes" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Writes" + +- metric_id: tenant.consumption.cross_region_network_ru + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Network traffic" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Request Units" + metric_ui_tab: [ "Request Units" ] + metric_ui_graph: "Cross-region Networking" + +- metric_id: sql.conn.latency + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "P90, P99" + datadog_id_selfhosted: sql.conn.latency + datadog_id_dedicated: sql.conn.latency + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Connection Latency" + +- metric_id: sql.conns + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Connections" + datadog_id_selfhosted: sql.conns + datadog_id_dedicated: sql.conns + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Open SQL Sessions" + +- metric_id: sql.txns.open + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Open Transactions" + datadog_id_selfhosted: sql.txns.open + datadog_id_dedicated: sql.txns.open + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Open SQL Transactions" + +- metric_id: sql.txn.begin.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Begin" + datadog_id_selfhosted: sql.txn.begin.count + datadog_id_dedicated: sql.txn.begin.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transactions" + +- metric_id: sql.txn.commit.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Commits" + datadog_id_selfhosted: sql.txn.commit.count + datadog_id_dedicated: sql.txn.commit.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transactions" + +- metric_id: sql.txn.rollback.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Rollbacks" + datadog_id_selfhosted: sql.txn.rollback.count + datadog_id_dedicated: sql.txn.rollback.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transactions" + +- metric_id: sql.txn.abort.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Aborts" + datadog_id_selfhosted: sql.txn.abort.count + datadog_id_dedicated: sql.txn.abort.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transactions" + +- metric_id: txn.restarts.writetooold + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Write Too Old" + datadog_id_selfhosted: txn.restarts.writetooold + datadog_id_dedicated: txn.restarts.writetooold + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.writetoooldmulti + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Write Too Old (multiple)" + datadog_id_selfhosted: txn.restarts.writetoooldmulti.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.serializable + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Forwarded Timestamp" + datadog_id_selfhosted: txn.restarts.serializable + datadog_id_dedicated: txn.restarts.serializable + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.asyncwritefailure + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Async Consensus Failure" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.readwithinuncertainty + deploy_selfhosted: false + deploy_dedicated: false + deploy_standard: true + short_name: "Read Within Uncertainty Interval" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.txnaborted + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Aborted" + datadog_id_selfhosted: txn.restarts.txnaborted.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.txnpush + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Push Failure" + datadog_id_selfhosted: txn.restarts.txnpush.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: txn.restarts.unknown + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Unknown" + datadog_id_selfhosted: txn.restarts.unknown.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Restarts" + +- metric_id: sql.txn.latency + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "P90, P99" + datadog_id_selfhosted: sql.txn.latency + datadog_id_dedicated: sql.txn.latency + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Transaction Latency" + +- metric_id: sql.statements.active + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Active Statements" + datadog_id_selfhosted: sql.statements.active + datadog_id_dedicated: sql.statements.active + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Active SQL Statements" + +- metric_id: sql.failure.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Errors" + datadog_id_selfhosted: sql.failure + datadog_id_dedicated: sql.failure.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "SQL Statement Errors" + +- metric_id: sql.distsql.contended_queries.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Contention" + datadog_id_selfhosted: sql.distsql.contended.queries + datadog_id_dedicated: sql.distsql.contended.queries + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "SQL Statement Contention" + +- metric_id: sql.full.scan.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Full scans" + datadog_id_selfhosted: sql.full.scan + datadog_id_dedicated: sql.full.scan.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Full Scans" + +- metric_id: sql.ddl.count + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "DDL Statements" + datadog_id_selfhosted: sql.ddl.count + datadog_id_dedicated: sql.ddl.count + essential: true + metric_type: SQL + metric_ui_tab: [ SQL ] + metric_ui_graph: "Schema Changes" + +- metric_id: jobs.auto_create_stats.currently_running + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Auto Create Statistics Running" + datadog_id_selfhosted: jobs.auto_create_stats.currently_running + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Table Statistics" + metric_ui_tab: [ SQL, Custom ] + metric_ui_graph: "Statistics Jobs" + +- metric_id: jobs.create_stats.currently_running + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Create Statistics Running" + datadog_id_selfhosted: jobs.create_stats.currently_running + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Table Statistics" + metric_ui_tab: [ SQL, Custom ] + metric_ui_graph: "Statistics Jobs" + +- metric_id: jobs.auto_create_stats.resume_failed + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Auto Create Statistics Failed" + datadog_id_selfhosted: jobs.auto.create.stats.resume_failed.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Table Statistics" + metric_ui_tab: [ SQL, Custom ] + metric_ui_graph: "Statistics Jobs" + +- metric_id: jobs.auto_create_stats.currently_paused + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Auto Create Statistics Paused" + datadog_id_selfhosted: jobs.auto.create.stats.currently_paused + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Table Statistics" + metric_ui_tab: [ SQL, Custom ] + metric_ui_graph: "Statistics Jobs" + +- metric_id: changefeed.running + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Running" + datadog_id_selfhosted: changefeed.running + datadog_id_dedicated: changefeed.running + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Changefeed Status" + +- metric_id: jobs.changefeed.currently_paused + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Paused" + datadog_id_selfhosted: jobs.changefeed.currently_paused + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Changefeed Status" + +- metric_id: changefeed.failures + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Failures" + datadog_id_selfhosted: changefeed.failures + datadog_id_dedicated: changefeed.failures + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Changefeed Status" + +- metric_id: changefeed.error_retries + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Errors" + datadog_id_selfhosted: changefeed.error_retries + datadog_id_dedicated: changefeed.error_retries + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Retryable Errors" + +- metric_id: changefeed.emitted_messages + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Emitted messages" + datadog_id_selfhosted: changefeed.emitted.messages + datadog_id_dedicated: changefeed.emitted.messages + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Emitted Messages" + +- metric_id: changefeed.emitted_bytes + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Emitted bytes" + datadog_id_selfhosted: changefeed.emitted_bytes.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Emitted Bytes" + +- metric_id: changefeed.commit_latency + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "P99, P90" + datadog_id_selfhosted: changefeed.commit_latency + datadog_id_dedicated: changefeed.commit_latency + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Commit Latency" + +- metric_id: jobs.changefeed.protected_age_sec + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Protected Timestamp Age" + datadog_id_selfhosted: jobs.changefeed.protected_age_sec + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: Changefeeds + metric_ui_tab: [ Changefeeds, Custom ] + metric_ui_graph: "Oldest Protected Timestamp" + +- metric_id: jobs.row_level_ttl.resume_completed + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Resume Completed" + datadog_id_selfhosted: jobs.row_level_ttl.resume_completed.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Row-Level TTL Jobs" + +- metric_id: jobs.row_level_ttl.currently_running + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Running" + datadog_id_selfhosted: jobs.row.level.ttl.currently_running + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Row-Level TTL Jobs" + +- metric_id: jobs.row_level_ttl.currently_paused + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Paused" + datadog_id_selfhosted: jobs.row_level_ttl.currently_paused + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Row-Level TTL Jobs" + +- metric_id: jobs.row_level_ttl.resume_failed + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Resume Failed" + datadog_id_selfhosted: jobs.row_level_ttl.resume_failed.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Row-Level TTL Jobs" + +- metric_id: schedules.scheduled-row-level-ttl-executor.failed + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Failed Schedules" + datadog_id_selfhosted: schedules.scheduled-row-level-ttl-executor.failed.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Row-Level TTL Jobs" + +- metric_id: jobs.row_level_ttl.rows_selected + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Rows selected" + datadog_id_selfhosted: jobs.row_level_ttl.rows_selected.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Processing Rate" + +- metric_id: jobs.row_level_ttl.rows_deleted + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Rows deleted" + datadog_id_selfhosted: jobs.row_level_ttl.rows_deleted.count + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL", Custom ] + metric_ui_graph: "Processing Rate" + +- metric_id: jobs.row_level_ttl.total_rows + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Total Rows" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL" ] + metric_ui_graph: "Estimated Rows" + +- metric_id: jobs.row_level_ttl.total_expired_rows + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Expired Rows" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL" ] + metric_ui_graph: "Estimated Rows" + +- metric_id: jobs.row_level_ttl.select_duration + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Select Latency (P90), Select Latency (P99)" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL" ] + metric_ui_graph: "Row-Level TTL Job Latency" + +- metric_id: jobs.row_level_ttl.delete_duration + deploy_selfhosted: true + deploy_dedicated: true + deploy_standard: true + short_name: "Delete Latency (P90), Delete Latency (P99)" + datadog_id_selfhosted: "NOT AVAILABLE" + datadog_id_dedicated: "NOT AVAILABLE" + essential: true + metric_type: "Row-Level TTL" + metric_ui_tab: [ "Row-Level TTL" ] + metric_ui_graph: "Row-Level TTL Job Latency" \ No newline at end of file diff --git a/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml b/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml new file mode 100644 index 00000000000..d1b55ff5b1a --- /dev/null +++ b/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml @@ -0,0 +1,254 @@ +# multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. +# The metrics are in the order of appearance in the comment: +# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# The tenant.consumption.* metrics are not included because they only apply to serverless. +# +# The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: +# cockroach gen metric-list --format=csv > metrics-list.csv +# Once generated, the metrics-list.csv was manually modified to change the case of the headers to lowercase to work with liquid comparison code. + +- multi_dimensional_metric_id: changefeed.error_retries + feature: changefeed + +- multi_dimensional_metric_id: changefeed.emitted_messages + feature: changefeed + +- multi_dimensional_metric_id: changefeed.emitted_batch_sizes + feature: changefeed + +- multi_dimensional_metric_id: changefeed.filtered_messages + feature: changefeed + +- multi_dimensional_metric_id: changefeed.message_size_hist + feature: changefeed + +- multi_dimensional_metric_id: changefeed.emitted_bytes + feature: changefeed + +- multi_dimensional_metric_id: changefeed.flushed_bytes + feature: changefeed + +- multi_dimensional_metric_id: changefeed.flushes + feature: changefeed + +- multi_dimensional_metric_id: changefeed.size_based_flushes + feature: changefeed + +- multi_dimensional_metric_id: changefeed.parallel_io_queue_nanos + feature: changefeed + +- multi_dimensional_metric_id: changefeed.parallel_io_pending_rows + feature: changefeed + +- multi_dimensional_metric_id: changefeed.parallel_io_result_queue_nanos + feature: changefeed + +- multi_dimensional_metric_id: changefeed.parallel_io_in_flight_keys + feature: changefeed + +- multi_dimensional_metric_id: changefeed.sink_io_inflight + feature: changefeed + +- multi_dimensional_metric_id: changefeed.sink_batch_hist_nanos + feature: changefeed + +- multi_dimensional_metric_id: changefeed.flush_hist_nanos + feature: changefeed + +- multi_dimensional_metric_id: changefeed.commit_latency + feature: changefeed + +- multi_dimensional_metric_id: changefeed.admit_latency + feature: changefeed + +- multi_dimensional_metric_id: changefeed.backfill_count + feature: changefeed + +- multi_dimensional_metric_id: changefeed.backfill_pending_ranges + feature: changefeed + +- multi_dimensional_metric_id: changefeed.running + feature: changefeed + +- multi_dimensional_metric_id: changefeed.batch_reduction_count + feature: changefeed + +- multi_dimensional_metric_id: changefeed.internal_retry_message_count + feature: changefeed + +- multi_dimensional_metric_id: changefeed.schema_registry.retry_count + feature: changefeed + +- multi_dimensional_metric_id: changefeed.schema_registry.registrations + feature: changefeed + +- multi_dimensional_metric_id: changefeed.aggregator_progress + feature: changefeed + +- multi_dimensional_metric_id: changefeed.checkpoint_progress + feature: changefeed + +- multi_dimensional_metric_id: changefeed.lagging_ranges + feature: changefeed + +- multi_dimensional_metric_id: changefeed.cloudstorage_buffered_bytes + feature: changefeed + +- multi_dimensional_metric_id: changefeed.kafka_throttling_hist_nanos + feature: changefeed + +- multi_dimensional_metric_id: livebytes + feature: virtual + +- multi_dimensional_metric_id: keybytes + feature: virtual + +- multi_dimensional_metric_id: valbytes + feature: virtual + +- multi_dimensional_metric_id: rangekeybytes + feature: virtual + +- multi_dimensional_metric_id: rangevalbytes + feature: virtual + +- multi_dimensional_metric_id: totalbytes + feature: virtual + +- multi_dimensional_metric_id: intentbytes + feature: virtual + +- multi_dimensional_metric_id: lockbytes + feature: virtual + +- multi_dimensional_metric_id: livecount + feature: virtual + +- multi_dimensional_metric_id: keycount + feature: virtual + +- multi_dimensional_metric_id: valcount + feature: virtual + +- multi_dimensional_metric_id: rangekeycount + feature: virtual + +- multi_dimensional_metric_id: rangevalcount + feature: virtual + +- multi_dimensional_metric_id: intentcount + feature: virtual + +- multi_dimensional_metric_id: lockcount + feature: virtual + +- multi_dimensional_metric_id: intentage + feature: virtual + +- multi_dimensional_metric_id: gcbytesage + feature: virtual + +- multi_dimensional_metric_id: sysbytes + feature: virtual + +- multi_dimensional_metric_id: syscount + feature: virtual + +- multi_dimensional_metric_id: abortspanbytes + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.num_tenants + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.current_blocked + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.read_batches_admitted + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.write_batches_admitted + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.read_requests_admitted + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.write_requests_admitted + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.read_bytes_admitted + feature: virtual + +- multi_dimensional_metric_id: kv.tenant_rate_limit.write_bytes_admitted + feature: virtual + +- multi_dimensional_metric_id: security.certificate.expiration.client + feature: secure + description: "Minimum expiration for client certificates, labeled by SQL user. 0 means no certificate or error." + type: GAUGE + unit: SECONDS + +- multi_dimensional_metric_id: jobs.row_level_ttl.span_total_duration + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.select_duration + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.delete_duration + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.rows_selected + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.rows_deleted + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.num_active_spans + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.total_rows + feature: row-level-ttl + +- multi_dimensional_metric_id: jobs.row_level_ttl.total_expired_rows + feature: row-level-ttl + +- multi_dimensional_metric_id: rpc.connection.healthy + feature: all + +- multi_dimensional_metric_id: rpc.connection.unhealthy + feature: all + +- multi_dimensional_metric_id: rpc.connection.inactive + feature: all + +- multi_dimensional_metric_id: rpc.connection.healthy_nanos + feature: all + +- multi_dimensional_metric_id: rpc.connection.unhealthy_nanos + feature: all + +- multi_dimensional_metric_id: rpc.connection.heartbeats + feature: all + +- multi_dimensional_metric_id: rpc.connection.failures + feature: all + +- multi_dimensional_metric_id: rpc.connection.avg_round_trip_latency + feature: all + +- multi_dimensional_metric_id: logical_replication.catchup_ranges_by_label + feature: ldr + +- multi_dimensional_metric_id: logical_replication.events_dlqed_by_label + feature: ldr + +- multi_dimensional_metric_id: logical_replication.events_ingested_by_label + feature: ldr + +- multi_dimensional_metric_id: logical_replication.replicated_time_by_label + feature: ldr + +- multi_dimensional_metric_id: logical_replication.scanning_ranges_by_label + feature: ldr + +- multi_dimensional_metric_id: sql.exec.latency.detail + feature: detailed-latency diff --git a/src/current/_data/versions.csv b/src/current/_data/versions.csv index 38d1b5b8a75..1f4062a2be4 100644 --- a/src/current/_data/versions.csv +++ b/src/current/_data/versions.csv @@ -18,3 +18,4 @@ v24.2,2024-08-12,2025-02-12,N/A,N/A,N/A,N/A,N/A,N/A,v24.1,release-24.2,2028-08-1 v24.3,2024-11-18,2025-11-18,2026-05-18,24.3.11,24.3.12,2025-05-05,2026-05-05,2027-05-05,v24.2,release-24.3,N/A v25.1,2025-02-18,2025-08-18,N/A,N/A,N/A,N/A,N/A,N/A,v24.3,release-25.1,N/A v25.2,2025-05-09,2026-05-12,2026-11-12,N/A,N/A,N/A,N/A,N/A,v25.1,release-25.2,N/A +v25.3,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,v25.2,master,N/A diff --git a/src/current/_includes/releases/v25.3/backward-incompatible.md b/src/current/_includes/releases/v25.3/backward-incompatible.md new file mode 100644 index 00000000000..d9f9ea9d4c1 --- /dev/null +++ b/src/current/_includes/releases/v25.3/backward-incompatible.md @@ -0,0 +1,11 @@ +Before [upgrading to CockroachDB v25.2]({% link v25.2/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as [key cluster setting changes](#v25-2-0-cluster-settings), and adjust your deployment as necessary. + +- Bullet +- Bullet +- Bullet +- Bullet + +[#]: https://github.com/cockroachdb/cockroach/pull/ +[#]: https://github.com/cockroachdb/cockroach/pull/ +[#]: https://github.com/cockroachdb/cockroach/pull/ +[#]: https://github.com/cockroachdb/cockroach/pull/ \ No newline at end of file diff --git a/src/current/_includes/releases/v25.3/cluster-setting-changes.md b/src/current/_includes/releases/v25.3/cluster-setting-changes.md new file mode 100644 index 00000000000..a4abfe0fc7b --- /dev/null +++ b/src/current/_includes/releases/v25.3/cluster-setting-changes.md @@ -0,0 +1,15 @@ +Changes to [cluster settings]({% link v25.2/cluster-settings.md %}) should be reviewed prior to upgrading. New default cluster setting values will be used unless you have manually set a value for a setting. This can be confirmed by running the SQL statement `SELECT * FROM system.settings` to view the non-default settings. + +
Settings added
+ +- Bullet +- Bullet +- Bullet +- Bullet +- Bullet + +
Settings with changed visibility
+ +The following settings are now marked `public` after previously being `reserved`. Reserved settings are not documented and their tuning by customers is not supported. + +- Bullet diff --git a/src/current/_includes/releases/v25.3/deprecations.md b/src/current/_includes/releases/v25.3/deprecations.md new file mode 100644 index 00000000000..ccfa39e304f --- /dev/null +++ b/src/current/_includes/releases/v25.3/deprecations.md @@ -0,0 +1,3 @@ +The following deprecations are announced in v25.2. + +- Bullet \ No newline at end of file diff --git a/src/current/_includes/releases/v25.3/feature-detail-key.html b/src/current/_includes/releases/v25.3/feature-detail-key.html new file mode 100644 index 00000000000..95d79284eb4 --- /dev/null +++ b/src/current/_includes/releases/v25.3/feature-detail-key.html @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + +
Feature detail key
Features marked "All*" were recently made available in the CockroachDB Cloud platform. They are available for all supported versions of CockroachDB, under the deployment methods specified in their row under Availability.
★★Features marked "All**" were recently made available via tools maintained outside of the CockroachDB binary. They are available to use with all supported versions of CockroachDB, under the deployment methods specified in their row under Availability.
{% include icon-yes.html %}Feature is available for this deployment method of CockroachDB as specified in the icon’s column: CockroachDB Self-hosted, CockroachDB Advanced, CockroachDB Standard, or CockroachDB Basic.
{% include icon-no.html %}Feature is not available for this deployment method of CockroachDB as specified in the icon’s column: CockroachDB Self-hosted, CockroachDB Advanced, CockroachDB Standard, or CockroachDB Basic.
diff --git a/src/current/_includes/releases/v25.3/feature-highlights-change-data-capture.html b/src/current/_includes/releases/v25.3/feature-highlights-change-data-capture.html new file mode 100644 index 00000000000..69ca4218cee --- /dev/null +++ b/src/current/_includes/releases/v25.3/feature-highlights-change-data-capture.html @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailability
Ver.Self-hostedAdvancedStandardBasic
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
diff --git a/src/current/_includes/releases/v25.3/feature-highlights-cloud.html b/src/current/_includes/releases/v25.3/feature-highlights-cloud.html new file mode 100644 index 00000000000..f5ec84fc79b --- /dev/null +++ b/src/current/_includes/releases/v25.3/feature-highlights-cloud.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailability
Ver.Self-hostedAdvancedStandardBasic
+

Summary

+

Description

+
All + + {% include icon-no.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
diff --git a/src/current/_includes/releases/v25.3/feature-highlights-kv.html b/src/current/_includes/releases/v25.3/feature-highlights-kv.html new file mode 100644 index 00000000000..1feb11eb0be --- /dev/null +++ b/src/current/_includes/releases/v25.3/feature-highlights-kv.html @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailability
Ver.Self-hostedAdvancedStandardBasic
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
diff --git a/src/current/_includes/releases/v25.3/feature-highlights-performance-and-high-availability.html b/src/current/_includes/releases/v25.3/feature-highlights-performance-and-high-availability.html new file mode 100644 index 00000000000..69ca4218cee --- /dev/null +++ b/src/current/_includes/releases/v25.3/feature-highlights-performance-and-high-availability.html @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailability
Ver.Self-hostedAdvancedStandardBasic
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
diff --git a/src/current/_includes/releases/v25.3/feature-highlights-sql.html b/src/current/_includes/releases/v25.3/feature-highlights-sql.html new file mode 100644 index 00000000000..ddc7af55fd0 --- /dev/null +++ b/src/current/_includes/releases/v25.3/feature-highlights-sql.html @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAvailability
Ver.Self-hostedAdvancedStandardBasic
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
+

Summary

+

Description

+
XX.Y{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}{% include icon-yes.html %}
diff --git a/src/current/_includes/releases/v25.3/upgrade-finalization.md b/src/current/_includes/releases/v25.3/upgrade-finalization.md new file mode 100644 index 00000000000..5e5dae1e18b --- /dev/null +++ b/src/current/_includes/releases/v25.3/upgrade-finalization.md @@ -0,0 +1,5 @@ +During a major-version upgrade, certain features and performance improvements may not be available until the upgrade is finalized. In v25.2, these are: + +- Bullet +- Bullet +- Bullet \ No newline at end of file diff --git a/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md b/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md new file mode 100644 index 00000000000..bb3ab0397b7 --- /dev/null +++ b/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md @@ -0,0 +1,211 @@ +## v25.3.0-alpha.1 + +Release Date: June 9, 2025 + +{% include releases/new-release-downloads-docker-image.md release=include.release %} +

Security updates

+ +- The client for the SQL connection will now receive an error along with an error in the `OPS` channel if trying to connect with an unsupported cipher. [#146522][#146522] + +

General changes

+ +- Enhanced the `/status/v2/hotranges` endpoint by adding two new filtering options: + - `per_node_limit` (`int32`): Specifies the maximum number of hot ranges to return per node. Defaults to `128` if not set. + - `stats_only` (`bool`): When set to `true`, returns only the statistics for hot ranges without fetching descriptor information, such as databases, tables, and indexes. [#144091][#144091] +- Changefeeds now round down the progress of each range to 1 second, in order to cover more ranges in fine-grained checkpointing. [#146979][#146979] +- Reduced the maximum backoff for changefeed retries from 10 minutes to 1 minute, which results in faster recovery from transient errors. [#146448][#146448] +- The secret keys in Azure cloud storage URIs are now redacted. [#147022][#147022] + +

SQL language changes

+ +- Added a new session variable `create_table_with_schema_locked`, which can be used to ensure all tables created by a session have the storage parameter `schema_locked` set. [#143892][#143892] +- The following syntax is now supported: + - `GRANT ... ON ALL ROUTINES IN SCHEMA ...` + - `REVOKE ... ON ALL ROUTINES IN SCHEMA ...` + - `ALTER DEFAULT PRIVILEGES GRANT ... ON ROUTINES ...` + - `ALTER DEFAULT PRIVILEGES REVOKE ... ON ROUTINES ...` + + The `ROUTINES` keyword makes the command apply to both functions and stored procedures. Note that `ALTER DEFAULT PRIVILEGES ... ON FUNCTIONS` already applied to stored procedures (which aligns with the PostgreSQL behavior), and that is not changing. [#144189][#144189] +- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). [#144522][#144522] +- Added new cluster settings: `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled`. These settings default to `false` and can be set to `true` to display the application name and database name, respectively, on supported metrics. [#144610][#144610] +- Added support for query tagging, which allows users to add query tags to their SQL statements via comments. These query tags are included in: + - All log entries generated during the execution of a SQL statement and are prefixed by `querytag-`. + - Traces and are prefixed by `querytag-`. + - In the `crdb_internal.cluster_execution_insights` and `crdb_internal.node_execution_insights` virtual tables in a new `query_tags` JSONB column. + This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). [#145435][#145435] +- `~~*` and `!~~*` are now supported aliases for `ILIKE` and `NOT ILIKE`. [#146764][#146764] +- The `information_schema.triggers` table is now populated with trigger metadata. Users can query this table to see all triggers defined in their database, including the trigger name, timing (`BEFORE`/`AFTER`), event type (`INSERT`/`UPDATE`/`DELETE`), and associated function. Each trigger event appears as a separate row in the table. [#147237][#147237] +- The `pg_catalog.pg_trigger` table now returns metadata about database triggers. [#147248][#147248] +- Deterministic collations are now supported with `LIKE`. A deterministic collation considers strings to be equal only if they consist of the same byte sequence. [#147045][#147045] +- Assigning to an element of a composite-typed variable in a PL/pgSQL routine now respects case-sensitivity rules. For example, a field named `"FOO_Bar"` can be assigned like `NEW."FOO_Bar" = 100`. [#143579][#143579] + +

Operational changes

+ +- Prometheus metrics are now also available at the `/metrics` endpoint, in addition to the existing `/_status/vars` endpoint. The new `/metrics` endpoint emits statically labeled metrics and will evolve more rapidly as CockroachDB migrates metrics to use labels instead of defining different metric names. For compatibility, users can continue to use `/_status/vars`, where metric names will remain stable. [#143536][#143536] +- Added the new latency metrics: `sql.service.latency.historical`, `sql.service.latency.consistent`, `sql.exec.latency.historical`, and `sql.exec.latency.consistent` for easier query optimizations. [#142826][#142826] +- Partial index schema changes are supported in replicating tables when `logical_replication.consumer.immediate_mode_writer` is not set to `legacy-kv`. [#144508][#144508] +- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. [#144181][#144181] +- Logs for hot ranges (`hot_ranges_stats` events) have been moved to the `HEALTH` logging channel. [#144567][#144567] +- Added a new metric, `kv.loadsplitter.cleardirection`, which increments when the load-based splitter observes that more than 80% of replica access samples are moving in a single direction (either left/descending or right/ascending). [#143927][#143927] +- When the `server.telemetry.hot_ranges_stats.enabled` cluster setting is enabled, nodes check for hot ranges every minute instead of every 4 hours. A node logs its hot ranges when any single replica exceeds 250 ms of CPU time per second. In multi-tenant deployments, the check runs every 5 minutes and logs hot ranges for the entire cluster. [#144414][#144414] +- Added the metric `changefeed.checkpoint.timestamp_count` that measures the number of unique timestamps in a changefeed span-level checkpoint. It may be useful to monitor this metric to determine if quantization settings should be changed. [#145117][#145117] +- In a physical cluster replication (PCR) deployment, it is not possible for the standby system virtual cluster, or the reader virtual cluster to upgrade the reader virtual cluster by setting the version cluster setting. It is necessary to: + 1. Upgrade the standby system virtual cluster. + 1. Upgrade the primary system virtual cluster. + 1. Upgrade the primary virtual cluster. + 1. Wait for the replicated time to advance past the time the primary virtual cluster upgraded. + 1. Shut down the reader virtual cluster. + 1. Upgrade the destination host cluster. + 1. Re-initialize the reader virtual cluster with `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER`. [#146127][#146127] +- Added job tracing support to changefeeds. [#144412][#144412] + +

Command-line changes

+ +- Node attributes (`attrs`) will now appear in the `node status` CLI command. [#143421][#143421] +- Updated the `\d ` command to show policy and Row Level Security information similar to what is shown in the output of `SHOW CREATE TABLE`. [#146215][#146215] +- Added the `--validate-zip-file` flag to the `cockroach debug zip` command. This flag performs a quick validation check to ensure that the generated zip file is not corrupted. The flag is enabled by default. [#146192][#146192] +- The SQL shell now supports the compact output mode when `auto_trace` is enabled. [#146432][#146432] + +

DB Console changes

+ +- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. [#144101][#144101] +- Updated the titles of the disk throughput graphs on the Metrics page Hardware dashboard to display only "Bytes/s" instead of including a specific magnitude, "MiB/s". The titles of the graphs are now “"Disk Read Bytes/s" and "Disk Write Bytes/s". [#147462][#147462] + +

Bug fixes

+ +- Fixed a bug where using values `changefeed.aggregator.flush_jitter`, `min_checkpoint_frequency` such that `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. [#144304][#144304] +- Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. [#144427][#144427] +- Improved the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. [#144900][#144900] +- Fixed a bug where running `DROP INDEX` on a hash-sharded index did not properly detect dependencies from functions and procedures on the shard column. This caused the `DROP INDEX` statement to fail with an internal validation error. Now the statement returns a correct error message, and using `DROP INDEX ... CASCADE` works as expected by dropping the dependent functions and procedures. [#145107][#145107] +- Fixed a bug that prevented variable references using ordinal syntax (like `$1`) from reflecting updates to the variable. Referencing variables declared in PL/pgSQL blocks (instead of parameters) via ordinal syntax is now disallowed. The bug had existed since v24.1. [#144347][#144347] +- Fixed a bug that caused index expression elements of primary keys to be shown incorrectly in the output of `SHOW CREATE TABLE`. [#144716][#144716] +- Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. [#145462][#145462] +- Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. [#145481][#145481] +- Fixed a bug that could cause a row-level TTL job to fail with the error "comparison of two different versions of enum" if an `ENUM` type referenced by the table experienced a schema change. [#145374][#145374] +- Fixed a bug where the physical cluster replication (PCR) reader catalog job could hit validation errors when schema objects had dependencies between them (for example, when a sequence's default expression was being removed). [#145972][#145972] +- Creating a vector index on a table that contains a `NULL` vector value will no longer cause an internal error. [#145983][#145983] +- Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. [#145551][#145551] +- Row-level security (RLS) `SELECT` policies during `UPDATE` operations are now only applied when referenced columns appear in the `SET` or `WHERE` clauses, matching the behavior of PostgreSQL. This improves compatibility. [#145344][#145344] +- Fixed an issue where using inline log configuration could cause internal errors on the DB Console's Logs page for a node at `#/node/{nodeID}/logs`. [#145329][#145329] +- Fixed an integer overflow in the `split_part` function when using extremely negative field positions like Go's `math.MinInt64`. [#146271][#146271] +- Fixed incorrect application of `SELECT` policies to `RETURNING` clauses in `INSERT` and `UPDATE` when no table columns were referenced. [#145890][#145890] +- Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. [#146287][#146287] +- Fixed a bug where `ALTER TABLE` operations with multiple commands could generate invalid zone configurations. [#146369][#146369] +- Fixed a bug where an invalid comment in the `system.comment` table for a schema object could make it inaccessible. [#146213][#146213] +- Fixed a bug where a CockroachDB node could crash when executing `DO` statements that contain currently unsupported DDL statements like `CREATE TYPE` in a non-default configuration (additional logging needed to be enabled, e.g., via the `sql.log.all_statements.enabled` cluster setting). This bug was introduced in v25.1. [#146406][#146406] +- Prevent use of future timestamps when using `AS OF SYSTEM TIME` with `CREATE TABLE ... AS` and materialized views. Previously, such timestamps could cause errors, delays, or hangs. [#146446][#146446] +- Fixed an internal error that could be hit when `ADD COLUMN UNIQUE` and `ALTER PRIMARY KEY` were executed within the same transaction. [#146567][#146567] +- Fixed a bug that prevented temporary views and sequences from being created if the `pg_temp` schema was explicitly specified in the qualified name of the object being created. [#146586][#146586] +- Fixed a bug where CockroachDB would not use the vectorized fast path for `COPY` when it was supported. The bug was only present in previous v25.2 releases. [#146696][#146696] +- Errors triggered by DB Console activity no longer cause the node to crash. [#145563][#145563] +- Fixed a bug to prevent HTTP connections from stopping server shutdown. [#146744][#146744] +- The MVCC timestamp is now emitted correctly when the `mvcc_timestamp` is used with CDC queries. [#146836][#146836] +- Fixed a bug in v25.2.0 where a vector search operator could drop user-supplied filters if the same vector column was indexed twice and a vector index with no prefix columns was defined after a vector index with prefix columns. [#146259][#146259] +- Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). [#146883][#146883] +- Fixed a data race in the `cloudstorage` sink. [#146297][#146297] +- Fixed a bug where the `kv.rangefeed.closed_timestamp.slow_ranges` would not be incremented when a rangefeed closed timestamp was slower than the target threshold. [#146949][#146949] +- Fixed a bug that could cause an `AFTER` trigger to fail with `client already committed or rolled back the transaction` if the query also contained foreign-key cascades. The bug had existed since `AFTER` triggers were introduced in v24.3. [#146890][#146890] +- Prevent dropping columns or indexes that are still referenced by triggers. Previously, these operations could succeed silently, potentially breaking trigger functionality. [#146683][#146683] +- Fixed a bug where searching a vector with a query vector that doesn't match the dimensions of the vector column in the table would cause a node to crash. [#146848][#146848] +- Specifying types for a subset of columns in a generator function's column definition list now results in a syntax error instead of an internal error. [#145492][#145492] +- Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. [#147021][#147021] +- CockroachDB could previously incorrectly evaluate `to_regclass`, `to_regnamespace`, `to_regproc`, `to_regprocedure`, `to_regrole`, and `to_regtype` builtin functions when the query using them happened to be evaluated in distributed fashion. The bug has been present since the introduction of these builtins in v23.1 and is now fixed. [#147362][#147362] +- Fixed a bug that caused the optimizer to ignore index hints when optimizing some forms of prepared statements. This could result in one of two unexpected behaviors: a query errors with the message `index cannot be used for this query` when the index can actually be used; or a query uses an index that does not adhere to the hint. The hints relevant to this bug are regular index hints, e.g., `SELECT * FROM tab@index`, `FORCE_INVERTED_INDEX`, and `FORCE_ZIGZAG`. [#147368][#147368] +- Fixed a bug where the `pg_catalog.pg_policy` table could contain duplicate OID values when multiple tables had policies with the same policy ID. All rows in `pg_policy` now have unique OIDs as required. [#147373][#147373] +- Fixed a bug where the `rolbypassrls` column in `pg_roles` and `pg_authid` tables always returned false, even for roles with the `BYPASSRLS` option. [#147357][#147357] +- Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in point releases v23.2.22, v24.1.14, v24.3.9, and v25.1.2, and the v25.2 alpha. [#147187][#147187] +- Fixed an issue where updating child metrics and reinitializing metrics at the same time could cause scrape errors. [#147486][#147486] +- Fixed a runtime panic in the `substring_index` function that occurred when the count argument was the minimum 64-bit integer value. [#147546][#147546] +- Fixed a memory leak in index backfill jobs where completed spans were duplicated in memory on each progress update after resuming from a checkpoint. This could cause out-of-memory (OOM) errors when backfilling indexes on large tables with many ranges. This bug affected release version v25.2.0 and pre-release versions v25.2.0-alpha.3 through v25.2.0-rc.1. [#147511][#147511] +- Fixed a bug where prepared statements on schema changes could fail with runtime errors. [#147658][#147658] +- Fixed an issue with logical data replication (LDR) where the presence of a unique index may cause spurious dead letter queue (DLQ) entries if the unique index has a smaller index ID than the primary key index. [#147117][#147117] +- Scheduled backups now prevent multiple compaction jobs from running in parallel on its backups. [#145930][#145930] +- Removal of triggers during a restore now accounts for back references that existed because of triggers. [#147306][#147306] + +

Performance improvements

+ +- Prepared statements are now more efficiently cached. [#144021][#144021] +- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. [#145214][#145214] + +[#142826]: https://github.com/cockroachdb/cockroach/pull/142826 +[#143421]: https://github.com/cockroachdb/cockroach/pull/143421 +[#143536]: https://github.com/cockroachdb/cockroach/pull/143536 +[#143579]: https://github.com/cockroachdb/cockroach/pull/143579 +[#143892]: https://github.com/cockroachdb/cockroach/pull/143892 +[#143927]: https://github.com/cockroachdb/cockroach/pull/143927 +[#144021]: https://github.com/cockroachdb/cockroach/pull/144021 +[#144091]: https://github.com/cockroachdb/cockroach/pull/144091 +[#144101]: https://github.com/cockroachdb/cockroach/pull/144101 +[#144181]: https://github.com/cockroachdb/cockroach/pull/144181 +[#144189]: https://github.com/cockroachdb/cockroach/pull/144189 +[#144304]: https://github.com/cockroachdb/cockroach/pull/144304 +[#144347]: https://github.com/cockroachdb/cockroach/pull/144347 +[#144412]: https://github.com/cockroachdb/cockroach/pull/144412 +[#144414]: https://github.com/cockroachdb/cockroach/pull/144414 +[#144427]: https://github.com/cockroachdb/cockroach/pull/144427 +[#144508]: https://github.com/cockroachdb/cockroach/pull/144508 +[#144522]: https://github.com/cockroachdb/cockroach/pull/144522 +[#144567]: https://github.com/cockroachdb/cockroach/pull/144567 +[#144610]: https://github.com/cockroachdb/cockroach/pull/144610 +[#144716]: https://github.com/cockroachdb/cockroach/pull/144716 +[#144900]: https://github.com/cockroachdb/cockroach/pull/144900 +[#145107]: https://github.com/cockroachdb/cockroach/pull/145107 +[#145117]: https://github.com/cockroachdb/cockroach/pull/145117 +[#145214]: https://github.com/cockroachdb/cockroach/pull/145214 +[#145329]: https://github.com/cockroachdb/cockroach/pull/145329 +[#145344]: https://github.com/cockroachdb/cockroach/pull/145344 +[#145374]: https://github.com/cockroachdb/cockroach/pull/145374 +[#145435]: https://github.com/cockroachdb/cockroach/pull/145435 +[#145462]: https://github.com/cockroachdb/cockroach/pull/145462 +[#145481]: https://github.com/cockroachdb/cockroach/pull/145481 +[#145492]: https://github.com/cockroachdb/cockroach/pull/145492 +[#145551]: https://github.com/cockroachdb/cockroach/pull/145551 +[#145563]: https://github.com/cockroachdb/cockroach/pull/145563 +[#145890]: https://github.com/cockroachdb/cockroach/pull/145890 +[#145930]: https://github.com/cockroachdb/cockroach/pull/145930 +[#145972]: https://github.com/cockroachdb/cockroach/pull/145972 +[#145983]: https://github.com/cockroachdb/cockroach/pull/145983 +[#146127]: https://github.com/cockroachdb/cockroach/pull/146127 +[#146192]: https://github.com/cockroachdb/cockroach/pull/146192 +[#146213]: https://github.com/cockroachdb/cockroach/pull/146213 +[#146215]: https://github.com/cockroachdb/cockroach/pull/146215 +[#146259]: https://github.com/cockroachdb/cockroach/pull/146259 +[#146271]: https://github.com/cockroachdb/cockroach/pull/146271 +[#146287]: https://github.com/cockroachdb/cockroach/pull/146287 +[#146297]: https://github.com/cockroachdb/cockroach/pull/146297 +[#146369]: https://github.com/cockroachdb/cockroach/pull/146369 +[#146406]: https://github.com/cockroachdb/cockroach/pull/146406 +[#146432]: https://github.com/cockroachdb/cockroach/pull/146432 +[#146446]: https://github.com/cockroachdb/cockroach/pull/146446 +[#146448]: https://github.com/cockroachdb/cockroach/pull/146448 +[#146522]: https://github.com/cockroachdb/cockroach/pull/146522 +[#146567]: https://github.com/cockroachdb/cockroach/pull/146567 +[#146586]: https://github.com/cockroachdb/cockroach/pull/146586 +[#146683]: https://github.com/cockroachdb/cockroach/pull/146683 +[#146696]: https://github.com/cockroachdb/cockroach/pull/146696 +[#146744]: https://github.com/cockroachdb/cockroach/pull/146744 +[#146764]: https://github.com/cockroachdb/cockroach/pull/146764 +[#146836]: https://github.com/cockroachdb/cockroach/pull/146836 +[#146848]: https://github.com/cockroachdb/cockroach/pull/146848 +[#146883]: https://github.com/cockroachdb/cockroach/pull/146883 +[#146890]: https://github.com/cockroachdb/cockroach/pull/146890 +[#146949]: https://github.com/cockroachdb/cockroach/pull/146949 +[#146979]: https://github.com/cockroachdb/cockroach/pull/146979 +[#147021]: https://github.com/cockroachdb/cockroach/pull/147021 +[#147022]: https://github.com/cockroachdb/cockroach/pull/147022 +[#147045]: https://github.com/cockroachdb/cockroach/pull/147045 +[#147117]: https://github.com/cockroachdb/cockroach/pull/147117 +[#147187]: https://github.com/cockroachdb/cockroach/pull/147187 +[#147237]: https://github.com/cockroachdb/cockroach/pull/147237 +[#147248]: https://github.com/cockroachdb/cockroach/pull/147248 +[#147306]: https://github.com/cockroachdb/cockroach/pull/147306 +[#147357]: https://github.com/cockroachdb/cockroach/pull/147357 +[#147362]: https://github.com/cockroachdb/cockroach/pull/147362 +[#147368]: https://github.com/cockroachdb/cockroach/pull/147368 +[#147373]: https://github.com/cockroachdb/cockroach/pull/147373 +[#147462]: https://github.com/cockroachdb/cockroach/pull/147462 +[#147486]: https://github.com/cockroachdb/cockroach/pull/147486 +[#147511]: https://github.com/cockroachdb/cockroach/pull/147511 +[#147546]: https://github.com/cockroachdb/cockroach/pull/147546 +[#147548]: https://github.com/cockroachdb/cockroach/pull/147548 +[#147658]: https://github.com/cockroachdb/cockroach/pull/147658 diff --git a/src/current/_includes/releases/whats-new-intro.md b/src/current/_includes/releases/whats-new-intro.md index 738c61e4c88..706bde7ba5a 100644 --- a/src/current/_includes/releases/whats-new-intro.md +++ b/src/current/_includes/releases/whats-new-intro.md @@ -20,6 +20,12 @@ {% endunless %} {% endfor %} +{% comment %}Check if this major version has been released{% endcomment %} +{% assign rd = site.data.versions | where_exp: "rd", "rd.major_version == page.major_version" | first %} +{% if rd.release_date != "N/A" %} + {% assign released = true %} +{% endif %} + {% comment %}Some old pages don't have feature highlights and won't get LTS{% endcomment %} {% if page.major_version == 'v1.0' or page.major_version == 'v1.1' or @@ -89,13 +95,15 @@ CockroachDB {{ page.major_version }} is an [Innovation Release]({% link releases CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link releases/release-support-policy.md %}#support-phases){% endif %} is a required [Regular Release]({% link releases/release-support-policy.md %}#support-types). {% endif %} -Refer to [Major release types]({% link releases/release-support-policy.md %}#support-types) before installing or upgrading for release timing and support details.{% if no_highlights == false %} To learn what’s new in this release, refer to its [Feature Highlights](#feature-highlights).{% endif %} +Refer to [Major release types]({% link releases/release-support-policy.md %}#support-types) before installing or upgrading for release timing and support details.{% if no_highlights == false and released == true %} To learn what's new in this release, refer to its [Feature Highlights](#feature-highlights).{% endif %} On this page, you can read about changes and find downloads for all production and testing releases of CockroachDB {{ page.major_version }}{% if lts == true %} [(LTS)]({% link releases/release-support-policy.md %}#support-phases){% endif %} - +{% comment %}Only show these bullet points if the version has been released{% endcomment %} +{% if released == true %} {% comment %}v1.0 has no #v1-0-0 anchor, and before GA other releases also do not.{% endcomment %} - For key feature enhancements in {{ page.major_version }} and other upgrade considerations, refer to the notes for {% if include.major_version.release_date != 'N/A' and page.major_version != 'v1.0' %}[{{ page.major_version }}.0](#{{ page.major_version | replace: '.', '-' }}-0){% else %}{{ page.major_version }} on this page{% endif %}. +{% endif %} {% endif %}{% comment %}End GA-only content{% endcomment %} - For details about release types, naming, and licensing, refer to the [Releases]({% link releases/index.md %}) page. - Be sure to also review the [Release Support Policy]({% link releases/release-support-policy.md %}). diff --git a/src/current/_includes/sidebar-data-v25.3.json b/src/current/_includes/sidebar-data-v25.3.json new file mode 100644 index 00000000000..e945a13df1a --- /dev/null +++ b/src/current/_includes/sidebar-data-v25.3.json @@ -0,0 +1,28 @@ +[ + { + "title": "Docs Home", + "is_top_level": true, + "urls": [ + "/" + ] + }, + {% include_cached v25.3/sidebar-data/get-started.json %}, + {% include_cached v25.3/sidebar-data/releases.json %}, + {% include_cached v25.3/sidebar-data/feature-overview.json %}, + {% include_cached v25.3/sidebar-data/resilience.json %}, + {% include_cached v25.3/sidebar-data/connect-to-cockroachdb.json %}, + {% include_cached v25.3/sidebar-data/migrate.json %}, + {% include_cached v25.3/sidebar-data/cloud-deployments.json %}, + {% include_cached v25.3/sidebar-data/self-hosted-deployments.json %}, + {% include_cached v25.3/sidebar-data/schema-design.json %}, + {% include_cached v25.3/sidebar-data/reads-and-writes.json %}, + {% include_cached v25.3/sidebar-data/stream-data.json %}, + {% include_cached v25.3/sidebar-data/cross-cluster-replication.json %}, + {% include_cached v25.3/sidebar-data/multi-region-capabilities.json %}, + {% include_cached v25.3/sidebar-data/optimize-performance.json %}, + {% include_cached v25.3/sidebar-data/troubleshooting.json %}, + {% include_cached v25.3/sidebar-data/sql.json %}, + {% include_cached v25.3/sidebar-data/reference.json %}, + {% include_cached v25.3/sidebar-data/faqs.json %}, + {% include_cached sidebar-data-cockroach-university.json %} +] diff --git a/src/current/_includes/v25.3/app/before-you-begin.md b/src/current/_includes/v25.3/app/before-you-begin.md new file mode 100644 index 00000000000..8daf2f91005 --- /dev/null +++ b/src/current/_includes/v25.3/app/before-you-begin.md @@ -0,0 +1,12 @@ +1. [Install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}). +1. Start up a [secure]({% link {{ page.version.version }}/secure-a-cluster.md %}) or [insecure]({% link {{ page.version.version }}/start-a-local-cluster.md %}) local cluster. +1. Choose the instructions that correspond to whether your cluster is secure or insecure: + +
+ + +
+ +
+{% include {{ page.version.version }}/prod-deployment/insecure-flag.md %} +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/app/cc-free-tier-params.md b/src/current/_includes/v25.3/app/cc-free-tier-params.md new file mode 100644 index 00000000000..f8a196cdd8e --- /dev/null +++ b/src/current/_includes/v25.3/app/cc-free-tier-params.md @@ -0,0 +1,10 @@ +Where: + +- `{username}` and `{password}` specify the SQL username and password that you created earlier. +- `{globalhost}` is the name of the CockroachDB {{ site.data.products.cloud }} free tier host (e.g., `free-tier.gcp-us-central1.cockroachlabs.cloud`). +- `{path to the CA certificate}` is the path to the `cc-ca.crt` file that you downloaded from the CockroachDB {{ site.data.products.cloud }} Console. +- `{cluster_name}` is the name of your cluster. + +{{site.data.alerts.callout_info}} +If you are using the connection string that you [copied from the **Connection info** modal](#set-up-your-cluster-connection), your username, password, hostname, and cluster name will be pre-populated. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/app/create-a-database.md b/src/current/_includes/v25.3/app/create-a-database.md new file mode 100644 index 00000000000..b2fb4af6f79 --- /dev/null +++ b/src/current/_includes/v25.3/app/create-a-database.md @@ -0,0 +1,54 @@ +
+ +1. In the SQL shell, create the `bank` database that your application will use: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE bank; + ~~~ + +1. Create a SQL user for your app: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE USER WITH PASSWORD ; + ~~~ + + Take note of the username and password. You will use it in your application code later. + +1. Give the user the necessary permissions: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > GRANT ALL ON DATABASE bank TO ; + ~~~ + +
+ +
+ +1. If you haven't already, [download the CockroachDB binary]({% link {{ page.version.version }}/install-cockroachdb.md %}). +1. Start the [built-in SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}) using the connection string you got from the CockroachDB {{ site.data.products.cloud }} Console: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + --url='' + ~~~ + +1. In the SQL shell, create the `bank` database that your application will use: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE bank; + ~~~ + +1. Exit the SQL shell: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > \q + ~~~ + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/app/create-maxroach-user-and-bank-database.md b/src/current/_includes/v25.3/app/create-maxroach-user-and-bank-database.md new file mode 100644 index 00000000000..4e81a23b6bc --- /dev/null +++ b/src/current/_includes/v25.3/app/create-maxroach-user-and-bank-database.md @@ -0,0 +1,32 @@ +Start the [built-in SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}): + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach sql --certs-dir=certs +~~~ + +In the SQL shell, issue the following statements to create the `maxroach` user and `bank` database: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE USER IF NOT EXISTS maxroach; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE DATABASE bank; +~~~ + +Give the `maxroach` user the necessary permissions: + +{% include_cached copy-clipboard.html %} +~~~ sql +> GRANT ALL ON DATABASE bank TO maxroach; +~~~ + +Exit the SQL shell: + +{% include_cached copy-clipboard.html %} +~~~ sql +> \q +~~~ diff --git a/src/current/_includes/v25.3/app/for-a-complete-example-go.md b/src/current/_includes/v25.3/app/for-a-complete-example-go.md new file mode 100644 index 00000000000..5149489f6a6 --- /dev/null +++ b/src/current/_includes/v25.3/app/for-a-complete-example-go.md @@ -0,0 +1,4 @@ +For complete examples, see: + +- [Build a Go App with CockroachDB]({% link {{ page.version.version }}/build-a-go-app-with-cockroachdb.md %}) (pgx) +- [Build a Go App with CockroachDB and GORM]({% link {{ page.version.version }}/build-a-go-app-with-cockroachdb.md %}) diff --git a/src/current/_includes/v25.3/app/for-a-complete-example-java.md b/src/current/_includes/v25.3/app/for-a-complete-example-java.md new file mode 100644 index 00000000000..392ec2014d7 --- /dev/null +++ b/src/current/_includes/v25.3/app/for-a-complete-example-java.md @@ -0,0 +1,4 @@ +For complete examples, see: + +- [Build a Java App with CockroachDB]({% link {{ page.version.version }}/build-a-java-app-with-cockroachdb.md %}) (JDBC) +- [Build a Java App with CockroachDB and Hibernate]({% link {{ page.version.version }}/build-a-java-app-with-cockroachdb-hibernate.md %}) diff --git a/src/current/_includes/v25.3/app/for-a-complete-example-python.md b/src/current/_includes/v25.3/app/for-a-complete-example-python.md new file mode 100644 index 00000000000..29d0352eab3 --- /dev/null +++ b/src/current/_includes/v25.3/app/for-a-complete-example-python.md @@ -0,0 +1,6 @@ +For complete examples, see: + +- [Build a Python App with CockroachDB]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-psycopg3.md %}) (psycopg3) +- [Build a Python App with CockroachDB and SQLAlchemy]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-sqlalchemy.md %}) +- [Build a Python App with CockroachDB and Django]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-django.md %}) +- [Build a Python App with CockroachDB and asyncpg]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-asyncpg.md %}) diff --git a/src/current/_includes/v25.3/app/hibernate-dialects-note.md b/src/current/_includes/v25.3/app/hibernate-dialects-note.md new file mode 100644 index 00000000000..287f314d393 --- /dev/null +++ b/src/current/_includes/v25.3/app/hibernate-dialects-note.md @@ -0,0 +1,5 @@ +Versions of the Hibernate CockroachDB dialect correspond to the version of CockroachDB installed on your machine. For example, `org.hibernate.dialect.CockroachDB201Dialect` corresponds to CockroachDB v20.1 and later, and `org.hibernate.dialect.CockroachDB192Dialect` corresponds to CockroachDB v19.2 and later. + +All dialect versions are forward-compatible (e.g., CockroachDB v20.1 is compatible with `CockroachDB192Dialect`), as long as your application is not affected by any backward-incompatible changes listed in your CockroachDB version's [release notes]({% link releases/index.md %}). In the event of a CockroachDB version upgrade, using a previous version of the CockroachDB dialect will not break an application, but, to enable all features available in your version of CockroachDB, we recommend keeping the dialect version in sync with the installed version of CockroachDB. + +Not all versions of CockroachDB have a corresponding dialect yet. Use the dialect number that is closest to your installed version of CockroachDB. For example, use `CockroachDB201Dialect` when using CockroachDB v21.1 and later. diff --git a/src/current/_includes/v25.3/app/insecure/create-maxroach-user-and-bank-database.md b/src/current/_includes/v25.3/app/insecure/create-maxroach-user-and-bank-database.md new file mode 100644 index 00000000000..36c4814c12e --- /dev/null +++ b/src/current/_includes/v25.3/app/insecure/create-maxroach-user-and-bank-database.md @@ -0,0 +1,32 @@ +Start the [built-in SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}): + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach sql --insecure +~~~ + +In the SQL shell, issue the following statements to create the `maxroach` user and `bank` database: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE USER IF NOT EXISTS maxroach; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE DATABASE bank; +~~~ + +Give the `maxroach` user the necessary permissions: + +{% include_cached copy-clipboard.html %} +~~~ sql +> GRANT ALL ON DATABASE bank TO maxroach; +~~~ + +Exit the SQL shell: + +{% include_cached copy-clipboard.html %} +~~~ sql +> \q +~~~ diff --git a/src/current/_includes/v25.3/app/insecure/jooq-basic-sample/Sample.java b/src/current/_includes/v25.3/app/insecure/jooq-basic-sample/Sample.java new file mode 100644 index 00000000000..d1a54a8ddd2 --- /dev/null +++ b/src/current/_includes/v25.3/app/insecure/jooq-basic-sample/Sample.java @@ -0,0 +1,215 @@ +package com.cockroachlabs; + +import com.cockroachlabs.example.jooq.db.Tables; +import com.cockroachlabs.example.jooq.db.tables.records.AccountsRecord; +import org.jooq.DSLContext; +import org.jooq.SQLDialect; +import org.jooq.Source; +import org.jooq.conf.RenderQuotedNames; +import org.jooq.conf.Settings; +import org.jooq.exception.DataAccessException; +import org.jooq.impl.DSL; + +import java.io.InputStream; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; + +import static com.cockroachlabs.example.jooq.db.Tables.ACCOUNTS; + +public class Sample { + + private static final Random RAND = new Random(); + private static final boolean FORCE_RETRY = false; + private static final String RETRY_SQL_STATE = "40001"; + private static final int MAX_ATTEMPT_COUNT = 6; + + private static Function addAccounts() { + return ctx -> { + long rv = 0; + + ctx.delete(ACCOUNTS).execute(); + ctx.batchInsert( + new AccountsRecord(1L, 1000L), + new AccountsRecord(2L, 250L), + new AccountsRecord(3L, 314159L) + ).execute(); + + rv = 1; + System.out.printf("APP: addAccounts() --> %d\n", rv); + return rv; + }; + } + + private static Function transferFunds(long fromId, long toId, long amount) { + return ctx -> { + long rv = 0; + + AccountsRecord fromAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(fromId)); + AccountsRecord toAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(toId)); + + if (!(amount > fromAccount.getBalance())) { + fromAccount.setBalance(fromAccount.getBalance() - amount); + toAccount.setBalance(toAccount.getBalance() + amount); + + ctx.batchUpdate(fromAccount, toAccount).execute(); + rv = amount; + System.out.printf("APP: transferFunds(%d, %d, %d) --> %d\n", fromId, toId, amount, rv); + } + + return rv; + }; + } + + // Test our retry handling logic if FORCE_RETRY is true. This + // method is only used to test the retry logic. It is not + // intended for production code. + private static Function forceRetryLogic() { + return ctx -> { + long rv = -1; + try { + System.out.printf("APP: testRetryLogic: BEFORE EXCEPTION\n"); + ctx.execute("SELECT crdb_internal.force_retry('1s')"); + } catch (DataAccessException e) { + System.out.printf("APP: testRetryLogic: AFTER EXCEPTION\n"); + throw e; + } + return rv; + }; + } + + private static Function getAccountBalance(long id) { + return ctx -> { + AccountsRecord account = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(id)); + long balance = account.getBalance(); + System.out.printf("APP: getAccountBalance(%d) --> %d\n", id, balance); + return balance; + }; + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we do not have to duplicate it in + // various places. + private static long runTransaction(DSLContext session, Function fn) { + AtomicLong rv = new AtomicLong(0L); + AtomicInteger attemptCount = new AtomicInteger(0); + + while (attemptCount.get() < MAX_ATTEMPT_COUNT) { + attemptCount.incrementAndGet(); + + if (attemptCount.get() > 1) { + System.out.printf("APP: Entering retry loop again, iteration %d\n", attemptCount.get()); + } + + if (session.connectionResult(connection -> { + connection.setAutoCommit(false); + System.out.printf("APP: BEGIN;\n"); + + if (attemptCount.get() == MAX_ATTEMPT_COUNT) { + String err = String.format("hit max of %s attempts, aborting", MAX_ATTEMPT_COUNT); + throw new RuntimeException(err); + } + + // This block is only used to test the retry logic. + // It is not necessary in production code. See also + // the method 'testRetryLogic()'. + if (FORCE_RETRY) { + session.fetch("SELECT now()"); + } + + try { + rv.set(fn.apply(session)); + if (rv.get() != -1) { + connection.commit(); + System.out.printf("APP: COMMIT;\n"); + return true; + } + } catch (DataAccessException | SQLException e) { + String sqlState = e instanceof SQLException ? ((SQLException) e).getSQLState() : ((DataAccessException) e).sqlState(); + + if (RETRY_SQL_STATE.equals(sqlState)) { + // Since this is a transaction retry error, we + // roll back the transaction and sleep a little + // before trying again. Each time through the + // loop we sleep for a little longer than the last + // time (A.K.A. exponential backoff). + System.out.printf("APP: retryable exception occurred:\n sql state = [%s]\n message = [%s]\n retry counter = %s\n", sqlState, e.getMessage(), attemptCount.get()); + System.out.printf("APP: ROLLBACK;\n"); + connection.rollback(); + int sleepMillis = (int)(Math.pow(2, attemptCount.get()) * 100) + RAND.nextInt(100); + System.out.printf("APP: Hit 40001 transaction retry error, sleeping %s milliseconds\n", sleepMillis); + try { + Thread.sleep(sleepMillis); + } catch (InterruptedException ignored) { + // no-op + } + rv.set(-1L); + } else { + throw e; + } + } + + return false; + })) { + break; + } + } + + return rv.get(); + } + + public static void main(String[] args) throws Exception { + try (Connection connection = DriverManager.getConnection( + "jdbc:postgresql://localhost:26257/bank?sslmode=disable", + "maxroach", + "" + )) { + DSLContext ctx = DSL.using(connection, SQLDialect.COCKROACHDB, new Settings() + .withExecuteLogging(true) + .withRenderQuotedNames(RenderQuotedNames.NEVER)); + + // Initialise database with db.sql script + try (InputStream in = Sample.class.getResourceAsStream("/db.sql")) { + ctx.parser().parse(Source.of(in).readString()).executeBatch(); + } + + long fromAccountId = 1; + long toAccountId = 2; + long transferAmount = 100; + + if (FORCE_RETRY) { + System.out.printf("APP: About to test retry logic in 'runTransaction'\n"); + runTransaction(ctx, forceRetryLogic()); + } else { + + runTransaction(ctx, addAccounts()); + long fromBalance = runTransaction(ctx, getAccountBalance(fromAccountId)); + long toBalance = runTransaction(ctx, getAccountBalance(toAccountId)); + if (fromBalance != -1 && toBalance != -1) { + // Success! + System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalance); + System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalance); + } + + // Transfer $100 from account 1 to account 2 + long transferResult = runTransaction(ctx, transferFunds(fromAccountId, toAccountId, transferAmount)); + if (transferResult != -1) { + // Success! + System.out.printf("APP: transferFunds(%d, %d, %d) --> %d \n", fromAccountId, toAccountId, transferAmount, transferResult); + + long fromBalanceAfter = runTransaction(ctx, getAccountBalance(fromAccountId)); + long toBalanceAfter = runTransaction(ctx, getAccountBalance(toAccountId)); + if (fromBalanceAfter != -1 && toBalanceAfter != -1) { + // Success! + System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalanceAfter); + System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalanceAfter); + } + } + } + } + } +} diff --git a/src/current/_includes/v25.3/app/insecure/jooq-basic-sample/jooq-basic-sample.zip b/src/current/_includes/v25.3/app/insecure/jooq-basic-sample/jooq-basic-sample.zip new file mode 100644 index 00000000000..f11f86b8f43 Binary files /dev/null and b/src/current/_includes/v25.3/app/insecure/jooq-basic-sample/jooq-basic-sample.zip differ diff --git a/src/current/_includes/v25.3/app/insecure/upperdb-basic-sample/main.go b/src/current/_includes/v25.3/app/insecure/upperdb-basic-sample/main.go new file mode 100644 index 00000000000..5c855356d7e --- /dev/null +++ b/src/current/_includes/v25.3/app/insecure/upperdb-basic-sample/main.go @@ -0,0 +1,185 @@ +package main + +import ( + "fmt" + "log" + "time" + + "github.com/upper/db/v4" + "github.com/upper/db/v4/adapter/cockroachdb" +) + +// The settings variable stores connection details. +var settings = cockroachdb.ConnectionURL{ + Host: "localhost", + Database: "bank", + User: "maxroach", + Options: map[string]string{ + // Insecure node. + "sslmode": "disable", + }, +} + +// Accounts is a handy way to represent a collection. +func Accounts(sess db.Session) db.Store { + return sess.Collection("accounts") +} + +// Account is used to represent a single record in the "accounts" table. +type Account struct { + ID uint64 `db:"id,omitempty"` + Balance int64 `db:"balance"` +} + +// Collection is required in order to create a relation between the Account +// struct and the "accounts" table. +func (a *Account) Store(sess db.Session) db.Store { + return Accounts(sess) +} + +// createTables creates all the tables that are neccessary to run this example. +func createTables(sess db.Session) error { + _, err := sess.SQL().Exec(` + CREATE TABLE IF NOT EXISTS accounts ( + ID SERIAL PRIMARY KEY, + balance INT + ) + `) + if err != nil { + return err + } + return nil +} + +// crdbForceRetry can be used to simulate a transaction error and +// demonstrate upper/db's ability to retry the transaction automatically. +// +// By default, upper/db will retry the transaction five times, if you want +// to modify this number use: sess.SetMaxTransactionRetries(n). +// +// This is only used for demonstration purposes and not intended +// for production code. +func crdbForceRetry(sess db.Session) error { + var err error + + // The first statement in a transaction can be retried transparently on the + // server, so we need to add a placeholder statement so that our + // force_retry() statement isn't the first one. + _, err = sess.SQL().Exec(`SELECT 1`) + if err != nil { + return err + } + + // If force_retry is called during the specified interval from the beginning + // of the transaction it returns a retryable error. If not, 0 is returned + // instead of an error. + _, err = sess.SQL().Exec(`SELECT crdb_internal.force_retry('1s'::INTERVAL)`) + if err != nil { + return err + } + + return nil +} + +func main() { + // Connect to the local CockroachDB node. + sess, err := cockroachdb.Open(settings) + if err != nil { + log.Fatal("cockroachdb.Open: ", err) + } + defer sess.Close() + + // Adjust this number to fit your specific needs (set to 5, by default) + // sess.SetMaxTransactionRetries(10) + + // Create the "accounts" table + createTables(sess) + + // Delete all the previous items in the "accounts" table. + err = Accounts(sess).Truncate() + if err != nil { + log.Fatal("Truncate: ", err) + } + + // Create a new account with a balance of 1000. + account1 := Account{Balance: 1000} + err = Accounts(sess).InsertReturning(&account1) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Create a new account with a balance of 250. + account2 := Account{Balance: 250} + err = Accounts(sess).InsertReturning(&account2) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Printing records + printRecords(sess) + + // Change the balance of the first account. + account1.Balance = 500 + err = sess.Save(&account1) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Change the balance of the second account. + account2.Balance = 999 + err = sess.Save(&account2) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Printing records + printRecords(sess) + + // Delete the first record. + err = sess.Delete(&account1) + if err != nil { + log.Fatal("Delete: ", err) + } + + startTime := time.Now() + + // Add a couple of new records within a transaction. + err = sess.Tx(func(tx db.Session) error { + var err error + + if err = tx.Save(&Account{Balance: 887}); err != nil { + return err + } + + if time.Now().Sub(startTime) < time.Second*1 { + // Will fail continuously for 2 seconds. + if err = crdbForceRetry(tx); err != nil { + return err + } + } + + if err = tx.Save(&Account{Balance: 342}); err != nil { + return err + } + + return nil + }) + if err != nil { + log.Fatal("Could not commit transaction: ", err) + } + + // Printing records + printRecords(sess) +} + +func printRecords(sess db.Session) { + accounts := []Account{} + err := Accounts(sess).Find().All(&accounts) + if err != nil { + log.Fatal("Find: ", err) + } + log.Printf("Balances:") + for i := range accounts { + fmt.Printf("\taccounts[%d]: %d\n", accounts[i].ID, accounts[i].Balance) + } +} diff --git a/src/current/_includes/v25.3/app/java-tls-note.md b/src/current/_includes/v25.3/app/java-tls-note.md new file mode 100644 index 00000000000..fd490d2b0a8 --- /dev/null +++ b/src/current/_includes/v25.3/app/java-tls-note.md @@ -0,0 +1,13 @@ +CockroachDB supports TLS 1.2 and 1.3, and uses 1.3 by default. + +{% include common/tls-bad-cipher-warning.md %} + +[A bug in the TLS 1.3 implementation](https://bugs.openjdk.java.net/browse/JDK-8236039) in Java 11 versions lower than 11.0.7 and Java 13 versions lower than 13.0.3 makes the versions incompatible with CockroachDB. + +If an incompatible version is used, the client may throw the following exception: + +`javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request` + +For applications running Java 11 or 13, make sure that you have version 11.0.7 or higher, or 13.0.3 or higher. + +If you cannot upgrade to a version higher than 11.0.7 or 13.0.3, you must configure the application to use TLS 1.2. For example, when starting your app, use: `$ java -Djdk.tls.client.protocols=TLSv1.2 appName` diff --git a/src/current/_includes/v25.3/app/java-version-note.md b/src/current/_includes/v25.3/app/java-version-note.md new file mode 100644 index 00000000000..3d559314262 --- /dev/null +++ b/src/current/_includes/v25.3/app/java-version-note.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +We recommend using Java versions 8+ with CockroachDB. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/app/jooq-basic-sample/Sample.java b/src/current/_includes/v25.3/app/jooq-basic-sample/Sample.java new file mode 100644 index 00000000000..fd71726603e --- /dev/null +++ b/src/current/_includes/v25.3/app/jooq-basic-sample/Sample.java @@ -0,0 +1,215 @@ +package com.cockroachlabs; + +import com.cockroachlabs.example.jooq.db.Tables; +import com.cockroachlabs.example.jooq.db.tables.records.AccountsRecord; +import org.jooq.DSLContext; +import org.jooq.SQLDialect; +import org.jooq.Source; +import org.jooq.conf.RenderQuotedNames; +import org.jooq.conf.Settings; +import org.jooq.exception.DataAccessException; +import org.jooq.impl.DSL; + +import java.io.InputStream; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; + +import static com.cockroachlabs.example.jooq.db.Tables.ACCOUNTS; + +public class Sample { + + private static final Random RAND = new Random(); + private static final boolean FORCE_RETRY = false; + private static final String RETRY_SQL_STATE = "40001"; + private static final int MAX_ATTEMPT_COUNT = 6; + + private static Function addAccounts() { + return ctx -> { + long rv = 0; + + ctx.delete(ACCOUNTS).execute(); + ctx.batchInsert( + new AccountsRecord(1L, 1000L), + new AccountsRecord(2L, 250L), + new AccountsRecord(3L, 314159L) + ).execute(); + + rv = 1; + System.out.printf("APP: addAccounts() --> %d\n", rv); + return rv; + }; + } + + private static Function transferFunds(long fromId, long toId, long amount) { + return ctx -> { + long rv = 0; + + AccountsRecord fromAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(fromId)); + AccountsRecord toAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(toId)); + + if (!(amount > fromAccount.getBalance())) { + fromAccount.setBalance(fromAccount.getBalance() - amount); + toAccount.setBalance(toAccount.getBalance() + amount); + + ctx.batchUpdate(fromAccount, toAccount).execute(); + rv = amount; + System.out.printf("APP: transferFunds(%d, %d, %d) --> %d\n", fromId, toId, amount, rv); + } + + return rv; + }; + } + + // Test our retry handling logic if FORCE_RETRY is true. This + // method is only used to test the retry logic. It is not + // intended for production code. + private static Function forceRetryLogic() { + return ctx -> { + long rv = -1; + try { + System.out.printf("APP: testRetryLogic: BEFORE EXCEPTION\n"); + ctx.execute("SELECT crdb_internal.force_retry('1s')"); + } catch (DataAccessException e) { + System.out.printf("APP: testRetryLogic: AFTER EXCEPTION\n"); + throw e; + } + return rv; + }; + } + + private static Function getAccountBalance(long id) { + return ctx -> { + AccountsRecord account = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(id)); + long balance = account.getBalance(); + System.out.printf("APP: getAccountBalance(%d) --> %d\n", id, balance); + return balance; + }; + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we do not have to duplicate it in + // various places. + private static long runTransaction(DSLContext session, Function fn) { + AtomicLong rv = new AtomicLong(0L); + AtomicInteger attemptCount = new AtomicInteger(0); + + while (attemptCount.get() < MAX_ATTEMPT_COUNT) { + attemptCount.incrementAndGet(); + + if (attemptCount.get() > 1) { + System.out.printf("APP: Entering retry loop again, iteration %d\n", attemptCount.get()); + } + + if (session.connectionResult(connection -> { + connection.setAutoCommit(false); + System.out.printf("APP: BEGIN;\n"); + + if (attemptCount.get() == MAX_ATTEMPT_COUNT) { + String err = String.format("hit max of %s attempts, aborting", MAX_ATTEMPT_COUNT); + throw new RuntimeException(err); + } + + // This block is only used to test the retry logic. + // It is not necessary in production code. See also + // the method 'testRetryLogic()'. + if (FORCE_RETRY) { + session.fetch("SELECT now()"); + } + + try { + rv.set(fn.apply(session)); + if (rv.get() != -1) { + connection.commit(); + System.out.printf("APP: COMMIT;\n"); + return true; + } + } catch (DataAccessException | SQLException e) { + String sqlState = e instanceof SQLException ? ((SQLException) e).getSQLState() : ((DataAccessException) e).sqlState(); + + if (RETRY_SQL_STATE.equals(sqlState)) { + // Since this is a transaction retry error, we + // roll back the transaction and sleep a little + // before trying again. Each time through the + // loop we sleep for a little longer than the last + // time (A.K.A. exponential backoff). + System.out.printf("APP: retryable exception occurred:\n sql state = [%s]\n message = [%s]\n retry counter = %s\n", sqlState, e.getMessage(), attemptCount.get()); + System.out.printf("APP: ROLLBACK;\n"); + connection.rollback(); + int sleepMillis = (int)(Math.pow(2, attemptCount.get()) * 100) + RAND.nextInt(100); + System.out.printf("APP: Hit 40001 transaction retry error, sleeping %s milliseconds\n", sleepMillis); + try { + Thread.sleep(sleepMillis); + } catch (InterruptedException ignored) { + // no-op + } + rv.set(-1L); + } else { + throw e; + } + } + + return false; + })) { + break; + } + } + + return rv.get(); + } + + public static void main(String[] args) throws Exception { + try (Connection connection = DriverManager.getConnection( + "jdbc:postgresql://localhost:26257/bank?ssl=true&sslmode=require&sslrootcert=certs/ca.crt&sslkey=certs/client.maxroach.key.pk8&sslcert=certs/client.maxroach.crt", + "maxroach", + "" + )) { + DSLContext ctx = DSL.using(connection, SQLDialect.COCKROACHDB, new Settings() + .withExecuteLogging(true) + .withRenderQuotedNames(RenderQuotedNames.NEVER)); + + // Initialise database with db.sql script + try (InputStream in = Sample.class.getResourceAsStream("/db.sql")) { + ctx.parser().parse(Source.of(in).readString()).executeBatch(); + } + + long fromAccountId = 1; + long toAccountId = 2; + long transferAmount = 100; + + if (FORCE_RETRY) { + System.out.printf("APP: About to test retry logic in 'runTransaction'\n"); + runTransaction(ctx, forceRetryLogic()); + } else { + + runTransaction(ctx, addAccounts()); + long fromBalance = runTransaction(ctx, getAccountBalance(fromAccountId)); + long toBalance = runTransaction(ctx, getAccountBalance(toAccountId)); + if (fromBalance != -1 && toBalance != -1) { + // Success! + System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalance); + System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalance); + } + + // Transfer $100 from account 1 to account 2 + long transferResult = runTransaction(ctx, transferFunds(fromAccountId, toAccountId, transferAmount)); + if (transferResult != -1) { + // Success! + System.out.printf("APP: transferFunds(%d, %d, %d) --> %d \n", fromAccountId, toAccountId, transferAmount, transferResult); + + long fromBalanceAfter = runTransaction(ctx, getAccountBalance(fromAccountId)); + long toBalanceAfter = runTransaction(ctx, getAccountBalance(toAccountId)); + if (fromBalanceAfter != -1 && toBalanceAfter != -1) { + // Success! + System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalanceAfter); + System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalanceAfter); + } + } + } + } + } +} diff --git a/src/current/_includes/v25.3/app/jooq-basic-sample/jooq-basic-sample.zip b/src/current/_includes/v25.3/app/jooq-basic-sample/jooq-basic-sample.zip new file mode 100644 index 00000000000..859305478c0 Binary files /dev/null and b/src/current/_includes/v25.3/app/jooq-basic-sample/jooq-basic-sample.zip differ diff --git a/src/current/_includes/v25.3/app/pkcs8-gen.md b/src/current/_includes/v25.3/app/pkcs8-gen.md new file mode 100644 index 00000000000..3a750a5eea9 --- /dev/null +++ b/src/current/_includes/v25.3/app/pkcs8-gen.md @@ -0,0 +1,8 @@ +You can pass the [`--also-generate-pkcs8-key` flag]({% link {{ page.version.version }}/cockroach-cert.md %}#flag-pkcs8) to [`cockroach cert`]({% link {{ page.version.version }}/cockroach-cert.md %}) to generate a key in [PKCS#8 format](https://tools.ietf.org/html/rfc5208), which is the standard key encoding format in Java. For example, if you have the user `max`: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach cert create-client max --certs-dir=certs --ca-key=my-safe-directory/ca.key --also-generate-pkcs8-key +~~~ + +The generated PKCS8 key will be named `client.max.key.pk8`. diff --git a/src/current/_includes/v25.3/app/python/sqlalchemy/sqlalchemy-large-txns.py b/src/current/_includes/v25.3/app/python/sqlalchemy/sqlalchemy-large-txns.py new file mode 100644 index 00000000000..7a6ef82c2e3 --- /dev/null +++ b/src/current/_includes/v25.3/app/python/sqlalchemy/sqlalchemy-large-txns.py @@ -0,0 +1,57 @@ +from sqlalchemy import create_engine, Column, Float, Integer +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from cockroachdb.sqlalchemy import run_transaction +from random import random + +Base = declarative_base() + +# The code below assumes you have run the following SQL statements. + +# CREATE DATABASE pointstore; + +# USE pointstore; + +# CREATE TABLE points ( +# id INT PRIMARY KEY DEFAULT unique_rowid(), +# x FLOAT NOT NULL, +# y FLOAT NOT NULL, +# z FLOAT NOT NULL +# ); + +engine = create_engine( + # For cockroach demo: + 'cockroachdb://:@:/bank?sslmode=require', + echo=True # Log SQL queries to stdout +) + + +class Point(Base): + __tablename__ = 'points' + id = Column(Integer, primary_key=True) + x = Column(Float) + y = Column(Float) + z = Column(Float) + + +def add_points(num_points): + chunk_size = 1000 # Tune this based on object sizes. + + def add_points_helper(sess, chunk, num_points): + points = [] + for i in range(chunk, min(chunk + chunk_size, num_points)): + points.append( + Point(x=random()*1024, y=random()*1024, z=random()*1024) + ) + sess.bulk_save_objects(points) + + for chunk in range(0, num_points, chunk_size): + run_transaction( + sessionmaker(bind=engine), + lambda s: add_points_helper( + s, chunk, min(chunk + chunk_size, num_points) + ) + ) + + +add_points(10000) diff --git a/src/current/_includes/v25.3/app/retry-errors.md b/src/current/_includes/v25.3/app/retry-errors.md new file mode 100644 index 00000000000..7333d53f6bc --- /dev/null +++ b/src/current/_includes/v25.3/app/retry-errors.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +When running under the default [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) isolation level, your application should [use a retry loop to handle transaction retry errors]({% link {{ page.version.version }}/query-behavior-troubleshooting.md %}#transaction-retry-errors) that can occur under [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/app/see-also-links.md b/src/current/_includes/v25.3/app/see-also-links.md new file mode 100644 index 00000000000..805672736af --- /dev/null +++ b/src/current/_includes/v25.3/app/see-also-links.md @@ -0,0 +1,9 @@ +You might also be interested in the following pages: + +- [Client Connection Parameters]({% link {{ page.version.version }}/connection-parameters.md %}) +- [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %}) +- [Data Replication]({% link {{ page.version.version }}/demo-replication-and-rebalancing.md %}) +- [CockroachDB Resilience]({% link {{ page.version.version }}/demo-cockroachdb-resilience.md %}) +- [Replication & Rebalancing]({% link {{ page.version.version }}/demo-replication-and-rebalancing.md %}) +- [Cross-Cloud Migration]({% link {{ page.version.version }}/demo-automatic-cloud-migration.md %}) +- [Automated Operations]({% link {{ page.version.version }}/orchestrate-a-local-cluster-with-kubernetes-insecure.md %}) diff --git a/src/current/_includes/v25.3/app/start-cockroachdb.md b/src/current/_includes/v25.3/app/start-cockroachdb.md new file mode 100644 index 00000000000..5aeab710338 --- /dev/null +++ b/src/current/_includes/v25.3/app/start-cockroachdb.md @@ -0,0 +1,58 @@ +Choose whether to run a temporary local cluster or a free CockroachDB cluster on CockroachDB {{ site.data.products.serverless }}. The instructions below will adjust accordingly. + +
+ + +
+ +
+ +### Create a free cluster + +{% include cockroachcloud/quickstart/create-a-free-cluster.md %} + +### Set up your cluster connection + +The **Connection info** dialog shows information about how to connect to your cluster. + +1. Click the **Choose your OS** dropdown, and select the operating system of your local machine. + +1. Click the **Connection string** tab in the **Connection info** dialog. + +1. Open a new terminal on your local machine, and run the command provided in step **1** to download the CA certificate. This certificate is required by some clients connecting to CockroachDB {{ site.data.products.cloud }}. + +1. Copy the connection string provided in step **2** to a secure location. + + {{site.data.alerts.callout_info}} + The connection string is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`. + {{site.data.alerts.end}} + +
+ +
+ +1. If you haven't already, [download the CockroachDB binary]({% link {{ page.version.version }}/install-cockroachdb.md %}). +1. Run the [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach demo \ + --no-example-database + ~~~ + + This starts a temporary, in-memory cluster and opens an interactive SQL shell to the cluster. Any changes to the database will not persist after the cluster is stopped. + + {{site.data.alerts.callout_info}} + If `cockroach demo` fails due to SSL authentication, make sure you have cleared any previously downloaded CA certificates from the directory `~/.postgresql`. + {{site.data.alerts.end}} + +1. Take note of the `(sql)` connection string in the SQL shell welcome text: + + ~~~ + # Connection parameters: + # (webui) http://127.0.0.1:8080/demologin?password=demo76950&username=demo + # (sql) postgres://demo:demo76950@127.0.0.1:26257?sslmode=require + # (sql/unix) postgres://demo:demo76950@?host=%2Fvar%2Ffolders%2Fc8%2Fb_q93vjj0ybfz0fz0z8vy9zc0000gp%2FT%2Fdemo070856957&port=26257 + ~~~ + +
diff --git a/src/current/_includes/v25.3/app/upperdb-basic-sample/main.go b/src/current/_includes/v25.3/app/upperdb-basic-sample/main.go new file mode 100644 index 00000000000..3e838fe43e2 --- /dev/null +++ b/src/current/_includes/v25.3/app/upperdb-basic-sample/main.go @@ -0,0 +1,187 @@ +package main + +import ( + "fmt" + "log" + "time" + + "github.com/upper/db/v4" + "github.com/upper/db/v4/adapter/cockroachdb" +) + +// The settings variable stores connection details. +var settings = cockroachdb.ConnectionURL{ + Host: "localhost", + Database: "bank", + User: "maxroach", + Options: map[string]string{ + // Secure node. + "sslrootcert": "certs/ca.crt", + "sslkey": "certs/client.maxroach.key", + "sslcert": "certs/client.maxroach.crt", + }, +} + +// Accounts is a handy way to represent a collection. +func Accounts(sess db.Session) db.Store { + return sess.Collection("accounts") +} + +// Account is used to represent a single record in the "accounts" table. +type Account struct { + ID uint64 `db:"id,omitempty"` + Balance int64 `db:"balance"` +} + +// Collection is required in order to create a relation between the Account +// struct and the "accounts" table. +func (a *Account) Store(sess db.Session) db.Store { + return Accounts(sess) +} + +// createTables creates all the tables that are neccessary to run this example. +func createTables(sess db.Session) error { + _, err := sess.SQL().Exec(` + CREATE TABLE IF NOT EXISTS accounts ( + ID SERIAL PRIMARY KEY, + balance INT + ) + `) + if err != nil { + return err + } + return nil +} + +// crdbForceRetry can be used to simulate a transaction error and +// demonstrate upper/db's ability to retry the transaction automatically. +// +// By default, upper/db will retry the transaction five times, if you want +// to modify this number use: sess.SetMaxTransactionRetries(n). +// +// This is only used for demonstration purposes and not intended +// for production code. +func crdbForceRetry(sess db.Session) error { + var err error + + // The first statement in a transaction can be retried transparently on the + // server, so we need to add a placeholder statement so that our + // force_retry() statement isn't the first one. + _, err = sess.SQL().Exec(`SELECT 1`) + if err != nil { + return err + } + + // If force_retry is called during the specified interval from the beginning + // of the transaction it returns a retryable error. If not, 0 is returned + // instead of an error. + _, err = sess.SQL().Exec(`SELECT crdb_internal.force_retry('1s'::INTERVAL)`) + if err != nil { + return err + } + + return nil +} + +func main() { + // Connect to the local CockroachDB node. + sess, err := cockroachdb.Open(settings) + if err != nil { + log.Fatal("cockroachdb.Open: ", err) + } + defer sess.Close() + + // Adjust this number to fit your specific needs (set to 5, by default) + // sess.SetMaxTransactionRetries(10) + + // Create the "accounts" table + createTables(sess) + + // Delete all the previous items in the "accounts" table. + err = Accounts(sess).Truncate() + if err != nil { + log.Fatal("Truncate: ", err) + } + + // Create a new account with a balance of 1000. + account1 := Account{Balance: 1000} + err = Accounts(sess).InsertReturning(&account1) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Create a new account with a balance of 250. + account2 := Account{Balance: 250} + err = Accounts(sess).InsertReturning(&account2) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Printing records + printRecords(sess) + + // Change the balance of the first account. + account1.Balance = 500 + err = sess.Save(&account1) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Change the balance of the second account. + account2.Balance = 999 + err = sess.Save(&account2) + if err != nil { + log.Fatal("sess.Save: ", err) + } + + // Printing records + printRecords(sess) + + // Delete the first record. + err = sess.Delete(&account1) + if err != nil { + log.Fatal("Delete: ", err) + } + + startTime := time.Now() + + // Add a couple of new records within a transaction. + err = sess.Tx(func(tx db.Session) error { + var err error + + if err = tx.Save(&Account{Balance: 887}); err != nil { + return err + } + + if time.Now().Sub(startTime) < time.Second*1 { + // Will fail continuously for 2 seconds. + if err = crdbForceRetry(tx); err != nil { + return err + } + } + + if err = tx.Save(&Account{Balance: 342}); err != nil { + return err + } + + return nil + }) + if err != nil { + log.Fatal("Could not commit transaction: ", err) + } + + // Printing records + printRecords(sess) +} + +func printRecords(sess db.Session) { + accounts := []Account{} + err := Accounts(sess).Find().All(&accounts) + if err != nil { + log.Fatal("Find: ", err) + } + log.Printf("Balances:") + for i := range accounts { + fmt.Printf("\taccounts[%d]: %d\n", accounts[i].ID, accounts[i].Balance) + } +} diff --git a/src/current/_includes/v25.3/backups/advanced-examples-list.md b/src/current/_includes/v25.3/backups/advanced-examples-list.md new file mode 100644 index 00000000000..2d6c9a5956d --- /dev/null +++ b/src/current/_includes/v25.3/backups/advanced-examples-list.md @@ -0,0 +1,11 @@ +For examples of advanced `BACKUP` and `RESTORE` use cases, see: + +- [Incremental backups with a specified destination]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations) +- [Backup with revision history and point-in-time restore]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}) +- [Locality-aware backup and restore]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}) +- [Encrypted backup and restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) +- [Restore into a different database]({% link {{ page.version.version }}/restore.md %}#restore-tables-into-a-different-database) +- [Remove the foreign key before restore]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) +- [Restoring users from `system.users` backup]({% link {{ page.version.version }}/restore.md %}#restoring-users-from-system-users-backup) +- [Show an incremental backup at a different location]({% link {{ page.version.version }}/show-backup.md %}#show-a-backup-taken-with-the-incremental-location-option) +- [Exclude a table's data from backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#exclude-a-tables-data-from-backups) diff --git a/src/current/_includes/v25.3/backups/aws-auth-note.md b/src/current/_includes/v25.3/backups/aws-auth-note.md new file mode 100644 index 00000000000..b32ddde5c69 --- /dev/null +++ b/src/current/_includes/v25.3/backups/aws-auth-note.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +The examples in this section use the **default** `AUTH=specified` parameter. For more detail on how to use `implicit` authentication with Amazon S3 buckets, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/backups/azure-storage-tier-support.md b/src/current/_includes/v25.3/backups/azure-storage-tier-support.md new file mode 100644 index 00000000000..993b39ed3e7 --- /dev/null +++ b/src/current/_includes/v25.3/backups/azure-storage-tier-support.md @@ -0,0 +1 @@ +Cockroach Labs supports Azure's General Purpose v2 Standard storage account type. For more details, refer to the Azure [Storage Account documentation](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction#storage-accounts). \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/azure-url-encode.md b/src/current/_includes/v25.3/backups/azure-url-encode.md new file mode 100644 index 00000000000..7a16e20d2df --- /dev/null +++ b/src/current/_includes/v25.3/backups/azure-url-encode.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +Azure storage containers **require** a [url encoded](https://wikipedia.org/wiki/Percent-encoding) `ACCOUNT_KEY` since it is base64-encoded and may contain +, /, = characters. For more detail on how to pass your Azure Storage credentials with this parameter, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/backup-options-for-schedules.md b/src/current/_includes/v25.3/backups/backup-options-for-schedules.md new file mode 100644 index 00000000000..391c4dcf968 --- /dev/null +++ b/src/current/_includes/v25.3/backups/backup-options-for-schedules.md @@ -0,0 +1,8 @@ + Option | Value | Description +-----------------------------------------------------------------+-------------------------+------------------------------ +`revision_history` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | Create a backup with full [revision history]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}), which records every change made to the cluster within the garbage collection period leading up to and including the given timestamp.

You can specify a backup with revision history without any value e.g., `WITH revision_history`. Or, you can explicitly define `WITH revision_history = 'true' / 'false'`. The `revision_history` option defaults to `true` when used with [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) or `CREATE SCHEDULE FOR BACKUP`. A value is **required** when using `ALTER BACKUP SCHEDULE` to {% if page.name == "alter-backup-schedule.md" %} [apply different options to scheduled backups](#apply-different-options-to-scheduled-backups). {% else %} [alter a backup schedule](alter-backup-schedule.html). {% endif %} +`encryption_passphrase` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The passphrase used to [encrypt the files]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) (`BACKUP` manifest and data files) that the `BACKUP` statement generates. This same passphrase is needed to decrypt the file when it is used to [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). There is no practical limit on the length of the passphrase. +`detached` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | **Note:** Backups running on a schedule have the `detached` option applied implicitly. Therefore, you cannot modify this option for scheduled backups.

When a backup runs in `detached` mode, it will execute asynchronously. The job ID will be returned after the backup [job creation]({% link {{ page.version.version }}/backup-architecture.md %}#job-creation-phase) completes. Note that with `detached` specified, further job information and the job completion status will not be returned. For more on the differences between the returned job data, see the [example]({% link {{ page.version.version }}/backup.md %}#run-a-backup-asynchronously). To check on the job status, use the [`SHOW JOBS`](show-jobs.html) statement. +`EXECUTION LOCALITY` | Key-value pairs | Restricts the execution of the backup to nodes that match the defined locality filter requirements. For example, `WITH EXECUTION LOCALITY = 'region=us-west-1a,cloud=aws'`.

Refer to [Take Locality-restricted backups]({% link {{ page.version.version }}/take-locality-restricted-backups.md %}) for usage and reference detail. +`kms` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The URI of the cryptographic key stored in a key management service (KMS), or a comma-separated list of key URIs, used to [take and restore encrypted backups]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples). Refer to [URI Formats]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#uri-formats). The key or keys are used to encrypt the manifest and data files that the `BACKUP` statement generates and to decrypt them during a [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples) operation, and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}).

AWS KMS, Google Cloud KMS, and Azure Key Vault are supported. +`incremental_location` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Create an incremental backup in a different location than the default incremental backup location.

`WITH incremental_location = 'explicit_incrementals_URI'`

See [Incremental backups with explicitly specified destinations]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations) for usage. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/backup-options.md b/src/current/_includes/v25.3/backups/backup-options.md new file mode 100644 index 00000000000..a4c3eeae595 --- /dev/null +++ b/src/current/_includes/v25.3/backups/backup-options.md @@ -0,0 +1,8 @@ + Option | Value | Description +-----------------------------------------------------------------+-------------------------+------------------------------ +`revision_history` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | Create a backup with full [revision history]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}), which records every change made to the cluster within the garbage collection period leading up to and including the given timestamp.

You can specify a backup with revision history without any value e.g., `WITH revision_history`. Or, you can explicitly define `WITH revision_history = 'true' / 'false'`. `revision_history` defaults to `true` when used with `BACKUP` or `CREATE SCHEDULE FOR BACKUP`. A value is **required** when using [`ALTER BACKUP SCHEDULE`]({% link {{ page.version.version }}/alter-backup-schedule.md %}). +`encryption_passphrase` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The passphrase used to [encrypt the files]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) (`BACKUP` manifest and data files) that the `BACKUP` statement generates. This same passphrase is needed to decrypt the file when it is used to [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). There is no practical limit on the length of the passphrase. +`detached` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | When a backup runs in `detached` mode, it will execute asynchronously. The job ID will be returned after the backup [job creation]({% link {{ page.version.version }}/backup-architecture.md %}#job-creation-phase) completes. Note that with `detached` specified, further job information and the job completion status will not be returned. For more on the differences between the returned job data, see the [example]({% link {{ page.version.version }}/backup.md %}#run-a-backup-asynchronously). To check on the job status, use the [`SHOW JOBS`](show-jobs.html) statement. Backups running on a [schedule](create-schedule-for-backup.html) have the `detached` option applied implicitly.

To run a backup within a [transaction](transactions.html), use the `detached` option. +`EXECUTION LOCALITY` | Key-value pairs | Restricts the execution of the backup to nodes that match the defined locality filter requirements. For example, `WITH EXECUTION LOCALITY = 'region=us-west-1a,cloud=aws'`.

Refer to [Take Locality-restricted backups]({% link {{ page.version.version }}/take-locality-restricted-backups.md %}) for usage and reference detail. +`kms` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The URI of the cryptographic key stored in a key management service (KMS), or a comma-separated list of key URIs, used to [take and restore encrypted backups]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples). Refer to [URI Formats]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#uri-formats). The key or keys are used to encrypt the manifest and data files that the `BACKUP` statement generates and to decrypt them during a [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples) operation, and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}).

AWS KMS, Google Cloud KMS, and Azure Key Vault are supported. +`incremental_location` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Create an incremental backup in a different location than the default incremental backup location.

`WITH incremental_location = 'explicit_incrementals_URI'`

See [Incremental backups with explicitly specified destinations]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations) for usage. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/backup-storage-collision.md b/src/current/_includes/v25.3/backups/backup-storage-collision.md new file mode 100644 index 00000000000..c52cc1524e5 --- /dev/null +++ b/src/current/_includes/v25.3/backups/backup-storage-collision.md @@ -0,0 +1 @@ +You will encounter an error if you run multiple [backup collections]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#backup-collections) to the same storage URI. Each collection's URI must be unique. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/bulk-auth-options.md b/src/current/_includes/v25.3/backups/bulk-auth-options.md new file mode 100644 index 00000000000..57bc67ea190 --- /dev/null +++ b/src/current/_includes/v25.3/backups/bulk-auth-options.md @@ -0,0 +1,6 @@ +The examples in this section use one of the following storage URIs: + +- External connections, which allow you to represent an external storage or sink URI. You can then specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page. +- Amazon S3 connection strings with the **default** `AUTH=specified` parameter. For guidance on using `AUTH=implicit` authentication with Amazon S3 buckets instead, read [Cloud Storage Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). + +For guidance on connecting to other storage options or using other authentication parameters instead, read [Use Cloud Storage]({% link {{ page.version.version }}/use-cloud-storage.md %}#example-file-urls). \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/cap-parameter-ext-connection.md b/src/current/_includes/v25.3/backups/cap-parameter-ext-connection.md new file mode 100644 index 00000000000..2628b8527a1 --- /dev/null +++ b/src/current/_includes/v25.3/backups/cap-parameter-ext-connection.md @@ -0,0 +1 @@ +If you are creating an {% if page.name == "create-external-connection.md" %}external connection{% else %}[external connection]({% link {{ page.version.version }}/create-external-connection.md %}){% endif %} with [`BACKUP` query parameters]({% link {{ page.version.version }}/backup.md %}#query-parameters) or [authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) parameters, you must pass them in uppercase otherwise you will receive an `unknown query parameters` error. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/check-files-validate.md b/src/current/_includes/v25.3/backups/check-files-validate.md new file mode 100644 index 00000000000..b54cf5ce9a6 --- /dev/null +++ b/src/current/_includes/v25.3/backups/check-files-validate.md @@ -0,0 +1,32 @@ +1. Use `SHOW BACKUP ... check_files` with a backup for validation: + + {% include_cached copy-clipboard.html %} + ~~~sql + SHOW BACKUP "2022/09/19-134123.64" IN "s3://bucket?AWS_ACCESS_KEY_ID={Access Key ID}&AWS_SECRET_ACCESS_KEY={Secret Access Key}" WITH check_files; + ~~~ + + This will return the following output after validating that the backup files are correct and present: + + ~~~ + database_name | parent_schema_name | object_name | object_type | backup_type | start_time | end_time | size_bytes | rows | is_full_cluster | file_bytes + ----------------+--------------------+----------------------------+-------------+-------------+------------+----------------------------+------------+-------+-----------------+------------- + NULL | NULL | movr | database | full | NULL | 2022-09-19 13:41:23.645189 | NULL | NULL | f | NULL + movr | NULL | public | schema | full | NULL | 2022-09-19 13:41:23.645189 | NULL | NULL | f | NULL + movr | public | users | table | full | NULL | 2022-09-19 13:41:23.645189 | 31155 | 340 | f | 16598 + movr | public | vehicles | table | full | NULL | 2022-09-19 13:41:23.645189 | 22282 | 113 | f | 12459 + movr | public | rides | table | full | NULL | 2022-09-19 13:41:23.645189 | 261950 | 902 | f | 135831 + movr | public | vehicle_location_histories | table | full | NULL | 2022-09-19 13:41:23.645189 | 742557 | 10850 | f | 318583 + movr | public | promo_codes | table | full | NULL | 2022-09-19 13:41:23.645189 | 228320 | 1034 | f | 118376 + movr | public | user_promo_codes | table | full | NULL | 2022-09-19 13:41:23.645189 | 9320 | 111 | f | 4832 + ~~~ + + The output will return `file_bytes` along with the columns you receive from `SHOW BACKUP` without `check_files`. The `file_bytes` column indicates the estimated bytes in external storage for a particular table object. For more detail on the output columns, see the `SHOW BACKUP` [Response]({% link {{ page.version.version }}/show-backup.md %}#response) table. + +1. If `SHOW BACKUP ... check_files` cannot read from a file, it will return an error message similar to the following: + + ~~~ + ERROR: The following files are missing from the backup: + s3:/bucket-name/2022/09/19-134123.64/data/797981063156727810.sst + ~~~ + + `SHOW BACKUP ... check_files` will return up to ten file paths for incorrect or missing files. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/control-schedule-privileges.md b/src/current/_includes/v25.3/backups/control-schedule-privileges.md new file mode 100644 index 00000000000..13f0012ea7f --- /dev/null +++ b/src/current/_includes/v25.3/backups/control-schedule-privileges.md @@ -0,0 +1,3 @@ +- Members of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#default-roles). By default, the `root` user belongs to the `admin` role. +- Owners of a backup schedule, i.e., the user that [created the backup schedule]({% link {{ page.version.version }}/create-schedule-for-backup.md %}). +- Owners of a changefeed schedule, i.e., the user that [created the changefeed schedule]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/destination-privileges.md b/src/current/_includes/v25.3/backups/destination-privileges.md new file mode 100644 index 00000000000..fd5d019e97d --- /dev/null +++ b/src/current/_includes/v25.3/backups/destination-privileges.md @@ -0,0 +1,14 @@ +You can grant a user the `EXTERNALIOIMPLICITACCESS` [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). + +Either the `EXTERNALIOIMPLICITACCESS` system-level privilege or the [`admin`]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) role is required for the following scenarios: + +- Interacting with a cloud storage resource using [`IMPLICIT` authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). +- Using a [custom endpoint](https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/) on S3. +- Using the [`cockroach nodelocal upload`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) command. + +No special privilege is required for: + +- Interacting with an Amazon S3 and Google Cloud Storage resource using `SPECIFIED` credentials. Azure Storage is always `SPECIFIED` by default. +- Using [Userfile]({% link {{ page.version.version }}/use-userfile-storage.md %}) storage. + +{% include {{ page.version.version }}/misc/bulk-permission-note.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/encrypted-backup-description.md b/src/current/_includes/v25.3/backups/encrypted-backup-description.md new file mode 100644 index 00000000000..a81f545aaf6 --- /dev/null +++ b/src/current/_includes/v25.3/backups/encrypted-backup-description.md @@ -0,0 +1,11 @@ +You can encrypt full or incremental backups with a passphrase by using the [`encryption_passphrase` option]({% link {{ page.version.version }}/backup.md %}#with-encryption-passphrase). Files written by the backup (including `BACKUP` manifests and data files) are encrypted using the specified passphrase to derive a key. To restore the encrypted backup, the same `encryption_passphrase` option (with the same passphrase) must be included in the [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) statement. + +When used with [incremental backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups), the `encryption_passphrase` option is applied to all the [backup file URLs]({% link {{ page.version.version }}/backup.md %}#backup-file-urls), which means the same passphrase must be used when appending another incremental backup to an existing backup. Similarly, when used with [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}), the passphrase provided is applied to files in all localities. + +Encryption is done using [AES-256-GCM](https://wikipedia.org/wiki/Galois/Counter_Mode), and GCM is used to both encrypt and authenticate the files. A random [salt](https://wikipedia.org/wiki/Salt_(cryptography)) is used to derive a once-per-backup [AES](https://wikipedia.org/wiki/Advanced_Encryption_Standard) key from the specified passphrase, and then a random [initialization vector](https://wikipedia.org/wiki/Initialization_vector) is used per-file. CockroachDB uses [PBKDF2](https://wikipedia.org/wiki/PBKDF2) with 64,000 iterations for the key derivation. + +{{site.data.alerts.callout_info}} +`BACKUP` and `RESTORE` will use more memory when using encryption, as both the plain-text and cipher-text of a given file are held in memory during encryption and decryption. +{{site.data.alerts.end}} + +For an example of an encrypted backup, see [Create an encrypted backup]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#take-an-encrypted-backup-using-a-passphrase). diff --git a/src/current/_includes/v25.3/backups/existing-operation-service-account.md b/src/current/_includes/v25.3/backups/existing-operation-service-account.md new file mode 100644 index 00000000000..dc0f0996320 --- /dev/null +++ b/src/current/_includes/v25.3/backups/existing-operation-service-account.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If you already have the service account that contains permissions for the operation, ensure that you give the identity service account access to this service account. Click on your service account and navigate to the **Permissions** tab. Then, use the process in [step 3](#step-3-give-the-identity-service-account-the-token-creator-role) to complete this. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/external-io-implicit-flag.md b/src/current/_includes/v25.3/backups/external-io-implicit-flag.md new file mode 100644 index 00000000000..4abb21f78f6 --- /dev/null +++ b/src/current/_includes/v25.3/backups/external-io-implicit-flag.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If the use of implicit credentials is disabled with the [`--external-io-disable-implicit-credentials` flag]({% link {{ page.version.version }}/cockroach-start.md %}#security), you will receive an error when you access external cloud storage services with `AUTH=implicit`. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/file-size-setting.md b/src/current/_includes/v25.3/backups/file-size-setting.md new file mode 100644 index 00000000000..2ddef08efac --- /dev/null +++ b/src/current/_includes/v25.3/backups/file-size-setting.md @@ -0,0 +1,5 @@ +{{site.data.alerts.callout_info}} +To set a target for the amount of backup data written to each backup file, use the `bulkio.backup.file_size` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}). + +See the [`SET CLUSTER SETTING`]({% link {{ page.version.version }}/set-cluster-setting.md %}) page for more details on using cluster settings. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/backups/full-cluster-restore-validation.md b/src/current/_includes/v25.3/backups/full-cluster-restore-validation.md new file mode 100644 index 00000000000..c4196e6e198 --- /dev/null +++ b/src/current/_includes/v25.3/backups/full-cluster-restore-validation.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +Validation of full-cluster restores with `schema_only` must be run on an empty cluster in the same way as a complete [full-cluster restore]({% link {{ page.version.version }}/restore.md %}#full-cluster). Once you have successfully validated the restore, you can destroy the test cluster. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/gcs-auth-note.md b/src/current/_includes/v25.3/backups/gcs-auth-note.md new file mode 100644 index 00000000000..4c52b8625b7 --- /dev/null +++ b/src/current/_includes/v25.3/backups/gcs-auth-note.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +The examples in this section use the `AUTH=specified` parameter, which will be the default behavior in v21.2 and beyond for connecting to Google Cloud Storage. For more detail on how to pass your Google Cloud Storage credentials with this parameter, or, how to use `implicit` authentication, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/backups/gcs-default-deprec.md b/src/current/_includes/v25.3/backups/gcs-default-deprec.md new file mode 100644 index 00000000000..008ad61f4f9 --- /dev/null +++ b/src/current/_includes/v25.3/backups/gcs-default-deprec.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +**Deprecation notice:** Currently, GCS connections default to the `cloudstorage.gs.default.key` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}). This default behavior will no longer be supported in v21.2. If you are relying on this default behavior, we recommend adjusting your queries and scripts to now specify the `AUTH` parameter you want to use. Similarly, if you are using the `cloudstorage.gs.default.key` cluster setting to authorize your GCS connection, we recommend switching to use `AUTH=specified` or `AUTH=implicit`. `AUTH=specified` will be the default behavior in v21.2 and beyond. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/backups/locality-aware-access.md b/src/current/_includes/v25.3/backups/locality-aware-access.md new file mode 100644 index 00000000000..d0a57842341 --- /dev/null +++ b/src/current/_includes/v25.3/backups/locality-aware-access.md @@ -0,0 +1 @@ +A successful locality-aware backup job requires that each node in the cluster has access to each storage location. This is because any node in the cluster can claim the job and become the [_coordinator_]({% link {{ page.version.version }}/backup-architecture.md %}#job-creation-phase) node. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/locality-aware-backups.md b/src/current/_includes/v25.3/backups/locality-aware-backups.md new file mode 100644 index 00000000000..8ce87c53654 --- /dev/null +++ b/src/current/_includes/v25.3/backups/locality-aware-backups.md @@ -0,0 +1,39 @@ +{{site.data.alerts.callout_info}} +`SHOW BACKUP` is able to display metadata using `check_files` for locality-aware backups taken with the [`incremental_location`]({% link {{ page.version.version }}/show-backup.md %}#show-a-backup-taken-with-the-incremental-location-option) option. +{{site.data.alerts.end}} + +To view a list of [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}), pass the endpoint [collection URI]({% link {{ page.version.version }}/backup.md %}#backup-file-urls) that is set as the `default` location with `COCKROACH_LOCALITY=default`: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW BACKUPS IN 's3://{default collection URI}/{path}?AWS_ACCESS_KEY_ID={placeholder}&AWS_SECRET_ACCESS_KEY={placeholder}'; +~~~ + +~~~ + path +------------------------- +/2023/02/23-150925.62 +/2023/03/08-192859.44 +(2 rows) +~~~ + +To view a [locality-aware backup]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}), pass locality-aware backup URIs to `SHOW BACKUP`: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW BACKUP FROM LATEST IN ('s3://{bucket name}/locality?AWS_ACCESS_KEY_ID={placeholder}&AWS_SECRET_ACCESS_KEY={placeholder}&COCKROACH_LOCALITY=default', 's3://{bucket name}/locality?AWS_ACCESS_KEY_ID={placeholder}&AWS_SECRET_ACCESS_KEY={placeholder}&COCKROACH_LOCALITY=region%3Dus-west'); +~~~ + +~~~ + database_name | parent_schema_name | object_name | object_type | backup_type | start_time | end_time | size_bytes | rows | is_full_cluster +----------------+--------------------+----------------------------+-------------+-------------+------------+----------------------------+------------+------+------------------ + NULL | NULL | movr | database | full | NULL | 2023-02-23 15:09:25.625777 | NULL | NULL | f + movr | NULL | public | schema | full | NULL | 2023-02-23 15:09:25.625777 | NULL | NULL | f + movr | public | users | table | full | NULL | 2023-02-23 15:09:25.625777 | 5633 | 58 | f + movr | public | vehicles | table | full | NULL | 2023-02-23 15:09:25.625777 | 3617 | 17 | f + movr | public | rides | table | full | NULL | 2023-02-23 15:09:25.625777 | 159269 | 511 | f + movr | public | vehicle_location_histories | table | full | NULL | 2023-02-23 15:09:25.625777 | 79963 | 1092 | f + movr | public | promo_codes | table | full | NULL | 2023-02-23 15:09:25.625777 | 221763 | 1003 | f + movr | public | user_promo_codes | table | full | NULL | 2023-02-23 15:09:25.625777 | 927 | 11 | f +(8 rows) +~~~ diff --git a/src/current/_includes/v25.3/backups/locality-aware-multi-tenant.md b/src/current/_includes/v25.3/backups/locality-aware-multi-tenant.md new file mode 100644 index 00000000000..896d29db2d6 --- /dev/null +++ b/src/current/_includes/v25.3/backups/locality-aware-multi-tenant.md @@ -0,0 +1 @@ +Both CockroachDB {{ site.data.products.standard }} and CockroachDB {{ site.data.products.basic }} clusters operate with a different architecture compared to CockroachDB {{ site.data.products.core }}. These architectural differences have implications for how locality-aware backups can run. {{ site.data.products.standard }} and {{ site.data.products.basic }} clusters will scale resources depending on whether they are actively in use. This makes it less likely to have a SQL pod available in every locality. As a result, your Serverless cluster may not have a SQL pod in the locality where the data resides, which can lead to the cluster uploading that data to a storage bucket in a locality where you do have active SQL pods. You should consider this as you plan a backup strategy that must comply with [data domiciling]({% link v23.2/data-domiciling.md %}) requirements. diff --git a/src/current/_includes/v25.3/backups/metrics-per-node.md b/src/current/_includes/v25.3/backups/metrics-per-node.md new file mode 100644 index 00000000000..a1ec6e0b350 --- /dev/null +++ b/src/current/_includes/v25.3/backups/metrics-per-node.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +Metrics are reported per node. Therefore, it is necessary to retrieve metrics from every node in the cluster. For example, if you are monitoring whether a backup fails, it is necessary to track `scheduled_backup_failed` on each node. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/no-incremental-restore.md b/src/current/_includes/v25.3/backups/no-incremental-restore.md new file mode 100644 index 00000000000..6415ec7e034 --- /dev/null +++ b/src/current/_includes/v25.3/backups/no-incremental-restore.md @@ -0,0 +1 @@ +When you restore from an incremental backup, you're restoring the **entire** table, database, or cluster. CockroachDB uses both the latest (or a [specific]({% link {{ page.version.version }}/restore.md %}#restore-a-specific-full-or-incremental-backup)) incremental backup and the full backup during this process. You cannot restore an incremental backup without a full backup. Furthermore, it is not possible to restore over a [table]({% link {{ page.version.version }}/restore.md %}#tables), [database]({% link {{ page.version.version }}/restore.md %}#databases), or [cluster](restore.html#full-cluster) with existing data. Refer to [Restore types](restore.html#restore-types) for detail on the types of backups you can restore. diff --git a/src/current/_includes/v25.3/backups/object-dependency.md b/src/current/_includes/v25.3/backups/object-dependency.md new file mode 100644 index 00000000000..07bcbc698f3 --- /dev/null +++ b/src/current/_includes/v25.3/backups/object-dependency.md @@ -0,0 +1,13 @@ +Dependent objects must be {% if page.name == "restore.md" %} restored {% else %} backed up {% endif %} at the same time as the objects they depend on. When you back up a table, it will not include any dependent tables, [views]({% link {{ page.version.version }}/views.md %}), or [sequences]({% link {{ page.version.version }}/create-sequence.md %}). + +For example, if you back up [view]({% link {{ page.version.version }}/views.md %}) `v` that depends on table `t`, it will only back up `v`, not `t`. When you try to restore `v`, the restore will fail because the referenced table is not present in the backup. + +Alternatively, you can pass a `skip` option with {% if page.name == "restore.md" %} `RESTORE` {% else %} [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) {% endif %} to skip the dependency instead: + +Dependent object | Depends on | Skip option +-------|------------+------------- +Table with [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) constraints | The table it `REFERENCES`. | [`skip_missing_foreign_keys`]({% link {{ page.version.version }}/restore.md %}#skip_missing_foreign_keys) +Table with a [sequence]({% link {{ page.version.version }}/create-sequence.md %}) | The sequence. | [`skip_missing_sequences`]({% link {{ page.version.version }}/restore.md %}#skip-missing-sequences) +[Views]({% link {{ page.version.version }}/views.md %}) | The tables used in the view's `SELECT` statement. | [`skip_missing_views`]({% link {{ page.version.version }}/restore.md %}#skip-missing-views) + +We recommend treating tables with [foreign keys]({% link {{ page.version.version }}/foreign-key.md %}), which contribute to [views]({% link {{ page.version.version }}/views.md %}), or that use sequences or user-defined types as a single unit with their dependencies. While you can restore individual tables, you may find that backing up and restoring at the database level is more convenient. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/old-syntax-removed.md b/src/current/_includes/v25.3/backups/old-syntax-removed.md new file mode 100644 index 00000000000..7052fe0d3af --- /dev/null +++ b/src/current/_includes/v25.3/backups/old-syntax-removed.md @@ -0,0 +1,5 @@ +{{site.data.alerts.callout_danger}} +The `BACKUP ... TO` and `RESTORE ... FROM {storage_uri}` syntax has been removed from CockroachDB v24.3 and later. + +For details on the syntax to run `BACKUP` and `RESTORE`, refer to the {% if page.name == "backup.md" %} [backup](#examples) {% else %} [backup]({% link {{ page.version.version }}/backup.md %}#examples) {% endif %} and {% if page.name == "restore.md" %} [restore](#examples) {% else %} [restore]({% link {{ page.version.version }}/restore.md %}#examples) {% endif %} examples. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/protected-timestamps.md b/src/current/_includes/v25.3/backups/protected-timestamps.md new file mode 100644 index 00000000000..31b931f4cc4 --- /dev/null +++ b/src/current/_includes/v25.3/backups/protected-timestamps.md @@ -0,0 +1,5 @@ +Scheduled backups ensure that the data to be backed up is protected from garbage collection until it has been successfully backed up. This active management of [protected timestamps]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) means that you can run scheduled backups at a cadence independent from the [GC TTL]({% link {{ page.version.version }}/configure-replication-zones.md %}#gc-ttlseconds) of the data. This is unlike non-scheduled backups that are tightly coupled to the GC TTL. See [Garbage collection and backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#garbage-collection-and-backups) for more detail. + +The data being backed up will not be eligible for garbage collection until a successful backup completes. At this point, the schedule will release the existing protected timestamp record and write a new one to protect data for the next backup that is scheduled to run. It is important to consider that when a scheduled backup fails there will be an accumulation of data until the next successful backup. Resolving the backup failure or [dropping the backup schedule]({% link {{ page.version.version }}/drop-schedules.md %}) will make the data eligible for garbage collection once again. + +You can also use the `exclude_data_from_backup` option with a scheduled backup as a way to prevent protected timestamps from prolonging garbage collection on a table. See the example [Exclude a table's data from backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#exclude-a-tables-data-from-backups) for usage information. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/pts-schedules-incremental.md b/src/current/_includes/v25.3/backups/pts-schedules-incremental.md new file mode 100644 index 00000000000..b8cb26aff0b --- /dev/null +++ b/src/current/_includes/v25.3/backups/pts-schedules-incremental.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If you are creating incremental backups as part of a [backup schedule]({% link {{ page.version.version }}/create-schedule-for-backup.md %}), [protected timestamps]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) will ensure the backup revision data is not garbage collected, which allows you to lower the GC TTL. See [Protected timestamps and scheduled backups]({% link {{ page.version.version }}/create-schedule-for-backup.md %}#protected-timestamps-and-scheduled-backups) for more detail. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/recommend-backups-for-upgrade.md b/src/current/_includes/v25.3/backups/recommend-backups-for-upgrade.md new file mode 100644 index 00000000000..2ef075abf9c --- /dev/null +++ b/src/current/_includes/v25.3/backups/recommend-backups-for-upgrade.md @@ -0,0 +1,6 @@ +{% if page.path contains "cockroachcloud" %} +[Managed backups]({% link cockroachcloud/managed-backups.md %}) are automated backups of CockroachDB {{ site.data.products.cloud }} clusters that are stored by Cockroach Labs in cloud storage. By default, Cockroach Labs takes and retains managed backups in all Cloud clusters. + +When upgrading to a major release, you can optionally [take a self-managed backup]({% link cockroachcloud/take-and-restore-self-managed-backups.md %}) of your cluster to your own cloud storage, as an extra layer of protection in case the upgrade leads to issues. +{% else %} +CockroachDB is designed with high fault tolerance. However, taking regular backups of your data is an operational best practice for [disaster recovery]({% link {{ page.version.version }}/disaster-recovery-planning.md %}) planning.{% endif %} diff --git a/src/current/_includes/v25.3/backups/retry-failure.md b/src/current/_includes/v25.3/backups/retry-failure.md new file mode 100644 index 00000000000..e29f6a8d3a6 --- /dev/null +++ b/src/current/_includes/v25.3/backups/retry-failure.md @@ -0,0 +1 @@ +If a backup job encounters too many retryable errors, it will enter a [`failed` state]({% link {{ page.version.version }}/show-jobs.md %}#job-status) with the most recent error, which allows subsequent backups the chance to succeed. Refer to the [Backup and Restore Monitoring]({% link {{ page.version.version }}/backup-and-restore-monitoring.md %}) page for metrics to track backup failures. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/schedule-options.md b/src/current/_includes/v25.3/backups/schedule-options.md new file mode 100644 index 00000000000..ff28741b521 --- /dev/null +++ b/src/current/_includes/v25.3/backups/schedule-options.md @@ -0,0 +1,7 @@ + Option | Value | Description +----------------------------+-----------------------------------------+------------------------------ +`first_run` | [`TIMESTAMPTZ`]({% link {{ page.version.version }}/timestamp.md %}) / `now` | Execute the schedule at the specified time in the future. If not specified, the default behavior is to execute the schedule based on its next `RECURRING` time. +`on_execution_failure` | `retry` / `reschedule` / `pause` | If an error occurs during the backup execution, do the following:
  • `retry`: Retry the backup right away.
  • `reschedule`: Retry the backup by rescheduling it based on the `RECURRING` expression.
  • `pause`: Pause the schedule. This requires manual intervention to [resume the schedule]({% link {{ page.version.version }}/resume-schedules.md %}).
**Default**: `reschedule` +`on_previous_running` | `start` / `skip` / `wait` | If the previous backup started by the schedule is still running, do the following:
  • `start`: Start the new backup anyway, even if the previous one is still running.
  • `skip`: Skip the new backup and run the next backup based on the `RECURRING` expression.
  • `wait`: Wait for the previous backup to complete.
**Default**: `wait`. The option affects backups started by the full backup schedule only. Incremental backups are always set to `wait`. +`ignore_existing_backups` | N/A | If backups were already created in the [destination]({% link {{ page.version.version }}/use-cloud-storage.md %}) that the new schedule references, this option must be passed to acknowledge that the new schedule may be backing up different objects. +`updates_cluster_last_backup_time_metric` | N/A | ([`admin` privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) required) When set during backup schedule creation, this option updates the [`schedules_backup_last_completed_time`]({% link {{ page.version.version }}/backup-and-restore-monitoring.md %}#available-metrics) metric for the scheduled backup. diff --git a/src/current/_includes/v25.3/backups/scheduled-backups-tip.md b/src/current/_includes/v25.3/backups/scheduled-backups-tip.md new file mode 100644 index 00000000000..62ced81e069 --- /dev/null +++ b/src/current/_includes/v25.3/backups/scheduled-backups-tip.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_success}} +We recommend using scheduled backups to automate daily backups of your cluster. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/show-backup-replace-diagram.html b/src/current/_includes/v25.3/backups/show-backup-replace-diagram.html new file mode 100644 index 00000000000..539b72b45da --- /dev/null +++ b/src/current/_includes/v25.3/backups/show-backup-replace-diagram.html @@ -0,0 +1,50 @@ +
+ + + + + SHOW + + + BACKUPS + + + IN + + + collectionURI + + + BACKUP + + + SCHEMAS + + + FROM + + + subdirectory + + IN + + + collectionURI + + WITH + + + kv_option_list + + OPTIONS + + + ( + + + kv_option_list + + ) + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/storage-collision-examples.md b/src/current/_includes/v25.3/backups/storage-collision-examples.md new file mode 100644 index 00000000000..b6c3b85de05 --- /dev/null +++ b/src/current/_includes/v25.3/backups/storage-collision-examples.md @@ -0,0 +1 @@ +For example, if you have a backup schedule running backups for the database `users` the full backup and incremental backup should have the same storage URI for the full and incremental schedule. (`CREATE SCHEDULE FOR BACKUP` will automatically create two schedules for the full and incremental backup to the same storage URI.) If there is another backup schedule, for the database `accounts`, the full and incremental backups for `accounts` should have the same storage URI. However, the storage URI for the `accounts` backup collection should be different to the storage URI for the `users` backup collection. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/support-products.md b/src/current/_includes/v25.3/backups/support-products.md new file mode 100644 index 00000000000..d4687c816d2 --- /dev/null +++ b/src/current/_includes/v25.3/backups/support-products.md @@ -0,0 +1,3 @@ +## Supported products + +The feature described on this page is available in **CockroachDB {{ site.data.products.basic }}**, **CockroachDB {{ site.data.products.standard }}**, **CockroachDB {{ site.data.products.advanced }}**, and **CockroachDB {{ site.data.products.core }}** clusters when you are running [self-managed backups]({% link cockroachcloud/take-and-restore-self-managed-backups.md %}). For a full list of features, refer to [Backup and restore product support]({% link {{ page.version.version }}/backup-and-restore-overview.md %}#backup-and-restore-support). diff --git a/src/current/_includes/v25.3/backups/updated-backup-privileges.md b/src/current/_includes/v25.3/backups/updated-backup-privileges.md new file mode 100644 index 00000000000..013fa0f4d8d --- /dev/null +++ b/src/current/_includes/v25.3/backups/updated-backup-privileges.md @@ -0,0 +1,36 @@ +{{site.data.alerts.callout_info}} +Starting in v22.2, CockroachDB introduces a new [system-level privilege model]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) that provides finer control over a user's privilege to work with the database, including taking backups. + +There is continued support for the [legacy privilege model](#required-privileges-using-the-legacy-privilege-model) for backups in v22.2, however it **will be removed** in a future release of CockroachDB. We recommend implementing the new privilege model that follows in this section for all new and existing backups. +{{site.data.alerts.end}} + +You can [grant]({% link {{ page.version.version }}/grant.md %}#grant-privileges-on-specific-tables-in-a-database) the `BACKUP` privilege to a user or role depending on the type of backup: + +Backup | Privilege +-------+----------- +Cluster | Grant a user the `BACKUP` [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). For example, `GRANT SYSTEM BACKUP TO user;`. +Database | Grant a user the `BACKUP` privilege on the target database. For example, `GRANT BACKUP ON DATABASE test_db TO user;`. +Table | Grant a user the `BACKUP` privilege at the table level. This gives the user the privilege to back up the schema and all user-defined types that are associated with the table. For example, `GRANT BACKUP ON TABLE test_db.table TO user;`. + +The listed privileges do not cascade to objects lower in the schema tree. For example, if you are granted database-level `BACKUP` privileges, this does not give you the privilege to back up a table. If you need the `BACKUP` privilege on a database to apply to all newly created tables in that database, use [`DEFAULT PRIVILEGES`]({% link {{ page.version.version }}/security-reference/authorization.md %}#default-privileges). You can add `BACKUP` to the user or role's default privileges with [`ALTER DEFAULT PRIVILEGES`]({% link {{ page.version.version }}/alter-default-privileges.md %}#grant-default-privileges-to-a-specific-role). + +{{site.data.alerts.callout_info}} +You can grant the `BACKUP` privilege to a user or role **without** the `SELECT` privilege on a table. As a result, these users will be able to take backups, but they will not be able to run a `SELECT` query on that data directly. However, these users could still read this data indirectly, by restoring it from any backups they produce. +{{site.data.alerts.end}} + +Members of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) can run all three types of backups (cluster, database, and table) without the need to grant a specific `BACKUP` privilege. However, we recommend using the `BACKUP` privilege model to create users or roles and grant them `BACKUP` privileges as necessary for stronger access control. + +### Privileges for managing a backup job + +To manage a backup job with [`PAUSE JOB`]({% link {{ page.version.version }}/pause-job.md %}), [`RESUME JOB`]({% link {{ page.version.version }}/resume-job.md %}), or [`CANCEL JOB`]({% link {{ page.version.version }}/cancel-job.md %}), users must have at least one of the following: + +- Be a member of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role). +- The [`CONTROLJOB` role option]({% link {{ page.version.version }}/security-reference/authorization.md %}#role-options). + +To view a backup job with [`SHOW JOB`]({% link {{ page.version.version }}/show-jobs.md %}), users must have at least one of the following: + +- The [`VIEWJOB` privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges), which allows you to view all jobs (including `admin`-owned jobs). +- Be a member of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role). +- The [`CONTROLJOB` role option]({% link {{ page.version.version }}/security-reference/authorization.md %}#role-options). + +See [`GRANT`]({% link {{ page.version.version }}/grant.md %}) for detail on granting privileges to a role or user. \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/view-scheduled-backups.md b/src/current/_includes/v25.3/backups/view-scheduled-backups.md new file mode 100644 index 00000000000..5f3fee97e56 --- /dev/null +++ b/src/current/_includes/v25.3/backups/view-scheduled-backups.md @@ -0,0 +1,7 @@ + When a [backup is created by a schedule]({% link {{ page.version.version }}/create-schedule-for-backup.md %}), it is stored within a collection of backups in the given location. To view details for a backup created by a schedule, you can use the following: + +- `SHOW BACKUPS IN collectionURI` statement to [view a list of the full backup's subdirectories]({% link {{ page.version.version }}/show-backup.md %}#view-a-list-of-the-available-full-backup-subdirectories). +- `SHOW BACKUP FROM subdirectory IN collectionURI` statement to [view a list of the full and incremental backups that are stored in a specific full backup's subdirectory]({% link {{ page.version.version }}/show-backup.md %}#view-a-list-of-the-full-and-incremental-backups-in-a-specific-full-backup-subdirectory). +- Use the [Schedules page]({% link {{ page.version.version }}/ui-schedules-page.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) to view a list of created backup schedules and their individual details. + +For more details, see [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/backups/zone-configs-overwritten-during-restore.md b/src/current/_includes/v25.3/backups/zone-configs-overwritten-during-restore.md new file mode 100644 index 00000000000..0d6e9e29614 --- /dev/null +++ b/src/current/_includes/v25.3/backups/zone-configs-overwritten-during-restore.md @@ -0,0 +1 @@ +During a [cluster restore]({% link {{ page.version.version }}/restore.md %}#full-cluster), any [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}) present on the destination cluster are **overwritten** with the zone configurations from the [backed-up cluster]({% link {{ page.version.version }}/backup.md %}#back-up-a-cluster). If no customized zone configurations were on the cluster when the backup was taken, then after the restore the destination cluster will use the zone configuration from the [`RANGE DEFAULT` configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}#view-the-default-replication-zone). diff --git a/src/current/_includes/v25.3/backward-incompatible/alpha.1.md b/src/current/_includes/v25.3/backward-incompatible/alpha.1.md new file mode 100644 index 00000000000..e9a2efb7fb8 --- /dev/null +++ b/src/current/_includes/v25.3/backward-incompatible/alpha.1.md @@ -0,0 +1,15 @@ +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: + - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v22.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] +- Removed the deprecated `GRANT` privilege. [#81310][#81310] +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v22.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] diff --git a/src/current/_includes/v25.3/cdc/apache-pulsar-unsupported.md b/src/current/_includes/v25.3/cdc/apache-pulsar-unsupported.md new file mode 100644 index 00000000000..fecb7931784 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/apache-pulsar-unsupported.md @@ -0,0 +1,8 @@ +Changefeeds emitting to an Apache Pulsar sink do **not** support: + +- [`format=avro`]({% link {{ page.version.version }}/create-changefeed.md %}#format) +- [`confluent_schema_registry`]({% link {{ page.version.version }}/create-changefeed.md %}#confluent-schema-registry) +- [`topic_prefix`]({% link {{ page.version.version }}/create-changefeed.md %}#topic-prefix) +- Any batching configuration +- [Authentication query parameters]({% link {{ page.version.version }}/create-changefeed.md %}#query-parameters) +- [External connections]({% link {{ page.version.version }}/create-external-connection.md %}) \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/apache-pulsar-uri.md b/src/current/_includes/v25.3/cdc/apache-pulsar-uri.md new file mode 100644 index 00000000000..28e8f6438d8 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/apache-pulsar-uri.md @@ -0,0 +1,6 @@ +{% include_cached copy-clipboard.html %} +~~~ +pulsar://{host IP}:6650 +~~~ + +By default, Apache Pulsar listens for client connections on port `:6650`. For more detail on configuration, refer to the [Apache Pulsar documentation](https://pulsar.apache.org/docs/2.10.x/reference-configuration). \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/avro-limitations.md b/src/current/_includes/v25.3/cdc/avro-limitations.md new file mode 100644 index 00000000000..8580ac64cf7 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/avro-limitations.md @@ -0,0 +1,30 @@ +- [Decimals]({% link {{ page.version.version }}/decimal.md %}) must have precision specified. +- [`BYTES`]({% link {{ page.version.version }}/bytes.md %}) (or its aliases `BYTEA` and `BLOB`) are often used to store machine-readable data. When you stream these types through a changefeed with [`format=avro`]({% link {{ page.version.version }}/create-changefeed.md %}#format), CockroachDB does not encode or change the data. However, Avro clients can often include escape sequences to present the data in a printable format, which can interfere with deserialization. A potential solution is to hex-encode `BYTES` values when initially inserting them into CockroachDB. This will ensure that Avro clients can consistently decode the hexadecimal. Note that hex-encoding values at insertion will increase record size. +- [`BIT`]({% link {{ page.version.version }}/bit.md %}) and [`VARBIT`]({% link {{ page.version.version }}/bit.md %}) types are encoded as arrays of 64-bit integers. + + For efficiency, CockroachDB encodes `BIT` and `VARBIT` bitfield types as arrays of 64-bit integers. That is, [base-2 (binary format)](https://wikipedia.org/wiki/Binary_number#Conversion_to_and_from_other_numeral_systems) `BIT` and `VARBIT` data types are converted to base 10 and stored in arrays. Encoding in CockroachDB is [big-endian](https://wikipedia.org/wiki/Endianness), therefore the last value may have many trailing zeroes. For this reason, the first value of each array is the number of bits that are used in the last value of the array. + + For instance, if the bitfield is 129 bits long, there will be 4 integers in the array. The first integer will be `1`; representing the number of bits in the last value, the second integer will be the first 64 bits, the third integer will be bits 65–128, and the last integer will either be `0` or `9223372036854775808` (i.e., the integer with only the first bit set, or `1000000000000000000000000000000000000000000000000000000000000000` when base 2). + + This example is base-10 encoded into an array as follows: + + ~~~ + {"array": [1, , , 0 or 9223372036854775808]} + ~~~ + + For downstream processing, it is necessary to base-2 encode every element in the array (except for the first element). The first number in the array gives you the number of bits to take from the last base-2 number — that is, the most significant bits. So, in the example above this would be `1`. Finally, all the base-2 numbers can be appended together, which will result in the original number of bits, 129. + + In a different example of this process where the bitfield is 136 bits long, the array would be similar to the following when base-10 encoded: + + ~~~ + {"array": [8, 18293058736425533439, 18446744073709551615, 13690942867206307840]} + ~~~ + + To then work with this data, you would convert each of the elements in the array to base-2 numbers, besides the first element. For the above array, this would convert to: + + ~~~ + [8, 1111110111011011111111111111111111111111111111111111111111111111, 1111111111111111111111111111111111111111111111111111111111111111, 1011111000000000000000000000000000000000000000000000000000000000] + ~~~ + + Next, you use the first element in the array to take the number of bits from the last base-2 element, `10111110`. Finally, you append each of the base-2 numbers together — in the above array, the second, third, and truncated last element. This results in 136 bits, the original number of bits. +- {% include {{page.version.version}}/cdc/avro-udt-composite.md %} diff --git a/src/current/_includes/v25.3/cdc/avro-udt-composite.md b/src/current/_includes/v25.3/cdc/avro-udt-composite.md new file mode 100644 index 00000000000..7a34fbd3253 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/avro-udt-composite.md @@ -0,0 +1 @@ +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/azure-event-hubs-uri.md b/src/current/_includes/v25.3/cdc/azure-event-hubs-uri.md new file mode 100644 index 00000000000..bb356aca3ec --- /dev/null +++ b/src/current/_includes/v25.3/cdc/azure-event-hubs-uri.md @@ -0,0 +1,9 @@ +~~~ +'azure-event-hub://{event-hubs-namespace}.servicebus.windows.net:9093?shared_access_key_name={policy-name}&shared_access_key={url-encoded key}' +~~~ + +You can also use a `kafka://` scheme in the URI: + +~~~ +'kafka://{event-hubs-namespace}.servicebus.windows.net:9093?shared_access_key_name={policy-name}&shared_access_key={url-encoded key}' +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/bare-envelope-cdc-queries.md b/src/current/_includes/v25.3/cdc/bare-envelope-cdc-queries.md new file mode 100644 index 00000000000..6813c9b7f3d --- /dev/null +++ b/src/current/_includes/v25.3/cdc/bare-envelope-cdc-queries.md @@ -0,0 +1 @@ +{% if page.name == "cdc-queries.md" %}CDC queries{% else %}[CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}){% endif %} use [`envelope=bare`]({% link {{ page.version.version }}/create-changefeed.md %}#envelope) message format by default. The `bare` message envelope places the output of the `SELECT` clause at the top level of the message instead of under an `"after"` key. When there is additional information that the changefeed is sending, such as [`updated`]({% link {{ page.version.version }}/create-changefeed.md %}#updated) or [`resolved`](create-changefeed.html#resolved) timestamps, the messages will include a `crdb` field containing this information. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/cap-parameter-ext-connection.md b/src/current/_includes/v25.3/cdc/cap-parameter-ext-connection.md new file mode 100644 index 00000000000..8970ddaf2b0 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/cap-parameter-ext-connection.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If you are creating an {% if page.name == "create-external-connection.md" %}external connection{% else %}[external connection]({% link {{ page.version.version }}/create-external-connection.md %}){% endif %} with [`CREATE CHANGEFEED` query parameters]({% link {{ page.version.version }}/create-changefeed.md %}#query-parameters), you must pass them in lowercase otherwise you will receive an `unknown query parameters` error. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/cdc-cloud-rangefeed.md b/src/current/_includes/v25.3/cdc/cdc-cloud-rangefeed.md new file mode 100644 index 00000000000..9def4728ec2 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/cdc-cloud-rangefeed.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If you are working on a CockroachDB {{ site.data.products.standard }} or {{ site.data.products.basic }} cluster, the `kv.rangefeed.enabled` cluster setting is enabled by default. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/cdc-schema-locked-example.md b/src/current/_includes/v25.3/cdc/cdc-schema-locked-example.md new file mode 100644 index 00000000000..0908749d4de --- /dev/null +++ b/src/current/_includes/v25.3/cdc/cdc-schema-locked-example.md @@ -0,0 +1,15 @@ +Use the `schema_locked` [storage parameter]({% link {{ page.version.version }}/with-storage-parameter.md %}) to disallow [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) on a watched table, which allows the changefeed to take a fast path that avoids checking if there are schema changes that could require synchronization between [changefeed aggregators]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}). This helps to decrease the latency between a write committing to a table and it emitting to the [changefeed's sink]({% link {{ page.version.version }}/changefeed-sinks.md %}). Enabling `schema_locked` + +Enable `schema_locked` on the watched table with the [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}) statement: + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE watched_table SET (schema_locked = true); +~~~ + +While `schema_locked` is enabled on a table, attempted schema changes on the table will be rejected and an error returned. If you need to run a schema change on the locked table, unlock the table with `schema_locked = false`, complete the schema change, and then lock the table again with `schema_locked = true`. The changefeed will run as normal while `schema_locked = false`, but it will not benefit from the performance optimization. + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE watched_table SET (schema_locked = false); +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/changefeed-number-limit.md b/src/current/_includes/v25.3/cdc/changefeed-number-limit.md new file mode 100644 index 00000000000..85ad543bd82 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/changefeed-number-limit.md @@ -0,0 +1 @@ +We recommend limiting the number of changefeeds per cluster to 80. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/client-key-encryption.md b/src/current/_includes/v25.3/cdc/client-key-encryption.md new file mode 100644 index 00000000000..c7c7be4c38c --- /dev/null +++ b/src/current/_includes/v25.3/cdc/client-key-encryption.md @@ -0,0 +1 @@ +**Note:** Client keys are often encrypted. You will receive an error if you pass an encrypted client key in your changefeed statement. To decrypt the client key, run: `openssl rsa -in key.pem -out key.decrypt.pem -passin pass:{PASSWORD}`. Once decrypted, be sure to update your changefeed statement to use the new `key.decrypt.pem` file instead. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/cluster-iam-role-step.md b/src/current/_includes/v25.3/cdc/cluster-iam-role-step.md new file mode 100644 index 00000000000..64980c725bc --- /dev/null +++ b/src/current/_includes/v25.3/cdc/cluster-iam-role-step.md @@ -0,0 +1,41 @@ +1. Navigate to the [IAM console](https://console.aws.amazon.com/iam/), select **Roles** from the navigation, and then **Create role**. +1. Select **AWS service** for the **Trusted entity type**. For **Use case**, select **EC2** from the dropdown. Click **Next**. +1. On the **Add permissions** page, click **Next**. +1. Name the role (for example, `ec2-role`) and click **Create role**. +1. Once the role has finished creating, copy the ARN in the **Summary** section. Click on the **Trust relationships** tab. You'll find a **Trusted entities** policy: + + ~~~json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + ~~~ + +1. Navigate to the [IAM console](https://console.aws.amazon.com/iam/) and search for the role (`msk-role`) you created in Step 2 that contains the MSK policy. Select the role, which will take you to its summary page. +1. Click on the **Trust relationships** tab, and click **Edit trust policy**. Add the ARN of the EC2 IAM role (`ec2-role`) to the JSON policy: + + ~~~json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com", + "AWS": "arn:aws:iam::{account ID}:role/{ec2-role}" + }, + "Action": "sts:AssumeRole" + } + ] + } + ~~~ + + Once you've updated the policy, click **Update policy**. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/composite-key-delete-insert.md b/src/current/_includes/v25.3/cdc/composite-key-delete-insert.md new file mode 100644 index 00000000000..38c44f28d64 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/composite-key-delete-insert.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +When changes happen to a column that is part of a composite [key]({% link {{ page.version.version }}/primary-key.md %}), the changefeed will produce a {% if page.name == "cdc-queries.md" %}[delete message](#filter-delete-messages) {% else %}[delete message]({% link {{ page.version.version }}/changefeed-messages.md %}#delete-messages) {% endif %} and then an insert message. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/configure-all-changefeed.md b/src/current/_includes/v25.3/cdc/configure-all-changefeed.md new file mode 100644 index 00000000000..6ac9e6b9b4c --- /dev/null +++ b/src/current/_includes/v25.3/cdc/configure-all-changefeed.md @@ -0,0 +1,19 @@ +It is useful to be able to pause all running changefeeds during troubleshooting, testing, or when a decrease in CPU load is needed. + +To pause all running changefeeds: + +{% include_cached copy-clipboard.html %} +~~~sql +PAUSE JOBS (WITH x AS (SHOW CHANGEFEED JOBS) SELECT job_id FROM x WHERE status = ('running')); +~~~ + +This will change the status for each of the running changefeeds to `paused`, which can be verified with [`SHOW CHANGEFEED JOBS`]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs). + +To resume all running changefeeds: + +{% include_cached copy-clipboard.html %} +~~~sql +RESUME JOBS (WITH x AS (SHOW CHANGEFEED JOBS) SELECT job_id FROM x WHERE status = ('paused')); +~~~ + +This will resume the changefeeds and update the status for each of the changefeeds to `running`. diff --git a/src/current/_includes/v25.3/cdc/confluent-cloud-sr-url.md b/src/current/_includes/v25.3/cdc/confluent-cloud-sr-url.md new file mode 100644 index 00000000000..4796c1abf75 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/confluent-cloud-sr-url.md @@ -0,0 +1 @@ +To connect to Confluent Cloud, use the following URL structure: `'https://{API_KEY_ID}:{API_SECRET_URL_ENCODED}@{CONFLUENT_REGISTRY_URL}:443'`. See the [Stream a Changefeed to a Confluent Cloud Kafka Cluster]({% link {{ page.version.version }}/stream-a-changefeed-to-a-confluent-cloud-kafka-cluster.md %}#step-8-create-a-changefeed) tutorial for further detail. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/core-csv.md b/src/current/_includes/v25.3/cdc/core-csv.md new file mode 100644 index 00000000000..0901eed2def --- /dev/null +++ b/src/current/_includes/v25.3/cdc/core-csv.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +To determine how wide the columns need to be, the default `table` display format in `cockroach sql` buffers the results it receives from the server before printing them to the console. When consuming basic changefeed data using `cockroach sql`, it's important to use a display format like `csv` that does not buffer its results. To set the display format, use the [`--format=csv` flag]({% link {{ page.version.version }}/cockroach-sql.md %}#sql-flag-format) when starting the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}), or set the [`\set display_format=csv` option]({% link {{ page.version.version }}/cockroach-sql.md %}#client-side-options) once the SQL client is open. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/cdc/core-url.md b/src/current/_includes/v25.3/cdc/core-url.md new file mode 100644 index 00000000000..029e0ac40b7 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/core-url.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +Because basic changefeeds return results differently than other SQL statements, they require a dedicated database connection with specific settings around result buffering. In normal operation, CockroachDB improves performance by buffering results server-side before returning them to a client; however, result buffering is automatically turned off for basic changefeeds. basic changefeeds also have different cancellation behavior than other queries: they can only be canceled by closing the underlying connection or issuing a [`CANCEL QUERY`]({% link {{ page.version.version }}/cancel-query.md %}) statement on a separate connection. Combined, these attributes of changefeeds mean that applications should explicitly create dedicated connections to consume changefeed data, instead of using a connection pool as most client drivers do by default. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/cdc/create-core-changefeed-avro.md b/src/current/_includes/v25.3/cdc/create-core-changefeed-avro.md new file mode 100644 index 00000000000..53dab65cff2 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/create-core-changefeed-avro.md @@ -0,0 +1,122 @@ +In this example, you'll set up a basic changefeed for a single-node cluster that emits Avro records. CockroachDB's Avro binary encoding convention uses the [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/docs/serializer-formatter.html) to store Avro schemas. + +1. Use the [`cockroach start-single-node`]({% link {{ page.version.version }}/cockroach-start-single-node.md %}) command to start a single-node cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start-single-node \ + --insecure \ + --listen-addr=localhost \ + --background + ~~~ + +1. Download and extract the [Confluent Open Source platform](https://www.confluent.io/download/). + +1. Move into the extracted `confluent-` directory and start Confluent: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ./bin/confluent local services start + ~~~ + + Only `zookeeper`, `kafka`, and `schema-registry` are needed. To troubleshoot Confluent, see [their docs](https://docs.confluent.io/current/installation/installing_cp.html#zip-and-tar-archives) and the [Quick Start Guide](https://docs.confluent.io/platform/current/quickstart/ce-quickstart.html#ce-quickstart). + +1. As the `root` user, open the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --url="postgresql://root@127.0.0.1:26257?sslmode=disable" --format=csv + ~~~ + + {% include {{ page.version.version }}/cdc/core-url.md %} + + {% include {{ page.version.version }}/cdc/core-csv.md %} + +1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SET CLUSTER SETTING kv.rangefeed.enabled = true; + ~~~ + +1. Create table `bar`: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE TABLE bar (a INT PRIMARY KEY); + ~~~ + +1. Insert a row into the table: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO bar VALUES (0); + ~~~ + +1. Start the basic changefeed: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > EXPERIMENTAL CHANGEFEED FOR bar WITH format = avro, confluent_schema_registry = 'http://localhost:8081'; + ~~~ + + ~~~ + table,key,value + bar,\000\000\000\000\001\002\000,\000\000\000\000\002\002\002\000 + ~~~ + +1. In a new terminal, add another row: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --insecure -e "INSERT INTO bar VALUES (1)" + ~~~ + +1. Back in the terminal where the basic changefeed is streaming, the output will appear: + + ~~~ + bar,\000\000\000\000\001\002\002,\000\000\000\000\002\002\002\002 + ~~~ + + Note that records may take a couple of seconds to display in the basic changefeed. + +1. To stop streaming the changefeed, enter **CTRL+C** into the terminal where the changefeed is running. + +1. To stop `cockroach`: + + Get the process ID of the node: + + {% include_cached copy-clipboard.html %} + ~~~ shell + ps -ef | grep cockroach | grep -v grep + ~~~ + + ~~~ + 501 21766 1 0 6:21PM ttys001 0:00.89 cockroach start-single-node --insecure --listen-addr=localhost + ~~~ + + Gracefully shut down the node, specifying its process ID: + + {% include_cached copy-clipboard.html %} + ~~~ shell + kill -TERM 21766 + ~~~ + + ~~~ + initiating graceful shutdown of server + server drained and shutdown completed + ~~~ + +1. To stop Confluent, move into the extracted `confluent-` directory and stop Confluent: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ./bin/confluent local services stop + ~~~ + + To terminate all Confluent processes, use: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ./bin/confluent local destroy + ~~~ diff --git a/src/current/_includes/v25.3/cdc/create-core-changefeed.md b/src/current/_includes/v25.3/cdc/create-core-changefeed.md new file mode 100644 index 00000000000..df2264501a0 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/create-core-changefeed.md @@ -0,0 +1,98 @@ +In this example, you'll set up a basic changefeed for a single-node cluster. + +1. In a terminal window, start `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start-single-node \ + --insecure \ + --listen-addr=localhost \ + --background + ~~~ + +1. As the `root` user, open the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + --url="postgresql://root@127.0.0.1:26257?sslmode=disable" \ + --format=csv + ~~~ + + {% include {{ page.version.version }}/cdc/core-url.md %} + + {% include {{ page.version.version }}/cdc/core-csv.md %} + +1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SET CLUSTER SETTING kv.rangefeed.enabled = true; + ~~~ + +1. Create table `foo`: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE TABLE foo (a INT PRIMARY KEY); + ~~~ + +1. Insert a row into the table: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO foo VALUES (0); + ~~~ + +1. Start the basic changefeed: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > EXPERIMENTAL CHANGEFEED FOR foo; + ~~~ + ~~~ + table,key,value + foo,[0],"{""after"": {""a"": 0}}" + ~~~ + +1. In a new terminal, add another row: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --insecure -e "INSERT INTO foo VALUES (1)" + ~~~ + +1. Back in the terminal where the basic changefeed is streaming, the following output has appeared: + + ~~~ + foo,[1],"{""after"": {""a"": 1}}" + ~~~ + + Note that records may take a couple of seconds to display in the basic changefeed. + +1. To stop streaming the changefeed, enter **CTRL+C** into the terminal where the changefeed is running. + +1. To stop `cockroach`: + + Get the process ID of the node: + + {% include_cached copy-clipboard.html %} + ~~~ shell + ps -ef | grep cockroach | grep -v grep + ~~~ + + ~~~ + 501 21766 1 0 6:21PM ttys001 0:00.89 cockroach start-single-node --insecure --listen-addr=localhost + ~~~ + + Gracefully shut down the node, specifying its process ID: + + {% include_cached copy-clipboard.html %} + ~~~ shell + kill -TERM 21766 + ~~~ + + ~~~ + initiating graceful shutdown of server + server drained and shutdown completed + ~~~ diff --git a/src/current/_includes/v25.3/cdc/create-example-db-cdc.md b/src/current/_includes/v25.3/cdc/create-example-db-cdc.md new file mode 100644 index 00000000000..17902b10eac --- /dev/null +++ b/src/current/_includes/v25.3/cdc/create-example-db-cdc.md @@ -0,0 +1,50 @@ +1. Create a database called `cdc_demo`: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE cdc_demo; + ~~~ + +1. Set the database as the default: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SET DATABASE = cdc_demo; + ~~~ + +1. Create a table and add data: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE TABLE office_dogs ( + id INT PRIMARY KEY, + name STRING); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO office_dogs VALUES + (1, 'Petee'), + (2, 'Carl'); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > UPDATE office_dogs SET name = 'Petee H' WHERE id = 1; + ~~~ + +1. Create another table and add data: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE TABLE employees ( + dog_id INT REFERENCES office_dogs (id), + employee_name STRING); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO employees VALUES + (1, 'Lauren'), + (2, 'Spencer'); + ~~~ diff --git a/src/current/_includes/v25.3/cdc/csv-changefeed-format.md b/src/current/_includes/v25.3/cdc/csv-changefeed-format.md new file mode 100644 index 00000000000..eb04b0f97c4 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/csv-changefeed-format.md @@ -0,0 +1 @@ +Changefeeds emit the same CSV format as [`EXPORT`]({% link {{ page.version.version }}/export.md %}). In v22.1, changefeeds emitted CSV data that wrapped some values in single quotes, which were not wrapped when exporting data with the `EXPORT` statement. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/csv-udt-composite.md b/src/current/_includes/v25.3/cdc/csv-udt-composite.md new file mode 100644 index 00000000000..834bddd8366 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/csv-udt-composite.md @@ -0,0 +1 @@ +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/disable-replication-ttl.md b/src/current/_includes/v25.3/cdc/disable-replication-ttl.md new file mode 100644 index 00000000000..c846d1a7a7f --- /dev/null +++ b/src/current/_includes/v25.3/cdc/disable-replication-ttl.md @@ -0,0 +1,26 @@ +Use the `ttl_disable_changefeed_replication` table storage parameter to prevent changefeeds from sending `DELETE` messages issued by row-level TTL jobs for a table. Include the storage parameter when you create or alter the table. For example: + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE tbl ( + id UUID PRIMARY KEY default gen_random_uuid(), + value TEXT +) WITH (ttl_expire_after = '3 weeks', ttl_job_cron = '@daily', ttl_disable_changefeed_replication = 'true'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE events SET (ttl_expire_after = '1 year', ttl_disable_changefeed_replication = 'true'); +~~~ + +You can also widen the scope to the cluster by setting the `sql.ttl.changefeed_replication.disabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. This will prevent changefeeds from emitting deletes issued by all TTL jobs on a cluster. + +If you want to have a changefeed ignore the storage parameter or cluster setting that disables changefeed replication, you can set the changefeed option `ignore_disable_changefeed_replication` to `true`: + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE CHANGEFEED FOR TABLE table_name INTO 'external://changefeed-sink' + WITH resolved, ignore_disable_changefeed_replication = true; +~~~ + +This is useful when you have multiple use cases for different changefeeds on the same table. For example, you have a table with a changefeed streaming changes to another database for analytics workflows in which you do not want to reflect row-level TTL deletes. Secondly, you have a changefeed on the same table for audit-logging purposes for which you need to persist every change through the changefeed. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/elastic-cpu-performance.md b/src/current/_includes/v25.3/cdc/elastic-cpu-performance.md new file mode 100644 index 00000000000..6eb9336cd89 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/elastic-cpu-performance.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +By default, changefeeds are integrated with elastic CPU, which helps to prevent changefeeds from affecting foreground traffic. This may affect changefeed latency. For more detail on monitoring, refer to the [Changefeed performance]({% link {{ page.version.version }}/advanced-changefeed-configurations.md %}#changefeed-performance) section. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/examples-license-workload.md b/src/current/_includes/v25.3/cdc/examples-license-workload.md new file mode 100644 index 00000000000..32d395aaed8 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/examples-license-workload.md @@ -0,0 +1,24 @@ +1. If you do not already have one, [request a trial {{ site.data.products.enterprise }} license]({% link {{ page.version.version }}/licensing-faqs.md %}#obtain-a-license). + +1. Use the [`cockroach start-single-node`]({% link {{ page.version.version }}/cockroach-start-single-node.md %}) command to start a single-node cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + cockroach start-single-node --insecure --listen-addr=localhost + ~~~ + +1. In this example, you'll run CockroachDB's [Movr]({% link {{ page.version.version }}/movr.md %}) application workload to set up some data for your changefeed. + + In a new terminal, first create the schema for the workload: + + {% include_cached copy-clipboard.html %} + ~~~shell + cockroach workload init movr "postgresql://root@127.0.0.1:26257?sslmode=disable" + ~~~ + + Then run the workload: + + {% include_cached copy-clipboard.html %} + ~~~shell + cockroach workload run movr --duration=1m "postgresql://root@127.0.0.1:26257?sslmode=disable" + ~~~ diff --git a/src/current/_includes/v25.3/cdc/ext-conn-cluster-setting.md b/src/current/_includes/v25.3/cdc/ext-conn-cluster-setting.md new file mode 100644 index 00000000000..82d266ce59d --- /dev/null +++ b/src/current/_includes/v25.3/cdc/ext-conn-cluster-setting.md @@ -0,0 +1 @@ +To restrict a user's access to changefeed data and sink credentials, enable the `changefeed.permissions.require_external_connection_sink.enabled` cluster setting. When you enable this setting, users with the [`CHANGEFEED` privilege]({% link {{ page.version.version }}/create-changefeed.md %}#required-privileges) on a set of tables can only create changefeeds into [external connections]({% link {{ page.version.version }}/create-external-connection.md %}). diff --git a/src/current/_includes/v25.3/cdc/external-urls.md b/src/current/_includes/v25.3/cdc/external-urls.md new file mode 100644 index 00000000000..d87cb7538d1 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/external-urls.md @@ -0,0 +1,48 @@ +~~~ +[scheme]://[host]/[path]?[parameters] +~~~ + +Location | Scheme | Host | Parameters | +|-------------------------------------------------------------+-------------+--------------------------------------------------+---------------------------------------------------------------------------- +Amazon | `s3` | Bucket name | `AUTH` [1](#considerations) (optional; can be `implicit` or `specified`), `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN` +Azure | `azure` | N/A (see [Example file URLs](#example-file-urls) | `AZURE_ACCOUNT_KEY`, `AZURE_ACCOUNT_NAME` +Google Cloud [2](#considerations) | `gs` | Bucket name | `AUTH` (optional; can be `default`, `implicit`, or `specified`), `CREDENTIALS` +HTTP [3](#considerations) | `http` | Remote host | N/A +NFS/Local [4](#considerations) | `nodelocal` | `nodeID` or `self` [5](#considerations) (see [Example file URLs](#example-file-urls)) | N/A +S3-compatible services [6](#considerations) | `s3` | Bucket name | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`, `AWS_REGION` [7](#considerations) (optional), `AWS_ENDPOINT` + +{{site.data.alerts.callout_info}} +The location parameters often contain special characters that need to be URI-encoded. Use Javascript's [`encodeURIComponent`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) function or Go language's [`url.QueryEscape`](https://golang.org/pkg/net/url/#QueryEscape) function to URI-encode the parameters. Other languages provide similar functions to URI-encode special characters. +{{site.data.alerts.end}} + +{{site.data.alerts.callout_info}} +If your environment requires an HTTP or HTTPS proxy server for outgoing connections, you can set the standard `HTTP_PROXY` and `HTTPS_PROXY` environment variables when starting CockroachDB. + + If you cannot run a full proxy, you can disable external HTTP(S) access (as well as custom HTTP(S) endpoints) when performing bulk operations (e.g., [`BACKUP`]({% link {{ page.version.version }}/backup.md %}), [`RESTORE`]({% link {{ page.version.version }}/restore.md %}), etc.) by using the [`--external-io-disable-http` flag]({% link {{ page.version.version }}/cockroach-start.md %}#security). You can also disable the use of implicit credentials when accessing external cloud storage services for various bulk operations by using the [`--external-io-disable-implicit-credentials` flag]({% link {{ page.version.version }}/cockroach-start.md %}#security). +{{site.data.alerts.end}} + + + +- 1 If the `AUTH` parameter is not provided, AWS connections default to `specified` and the access keys must be provided in the URI parameters. If the `AUTH` parameter is `implicit`, the access keys can be omitted and [the credentials will be loaded from the environment](https://docs.aws.amazon.com/sdk-for-go/api/aws/session/). + +- 2 If the `AUTH` parameter is not specified, the `cloudstorage.gs.default.key` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) will be used if it is non-empty, otherwise the `implicit` behavior is used. If the `AUTH` parameter is `implicit`, all GCS connections use Google's [default authentication strategy](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application). If the `AUTH` parameter is `default`, the `cloudstorage.gs.default.key` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) must be set to the contents of a [service account file](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually) which will be used during authentication. If the `AUTH` parameter is `specified`, GCS connections are authenticated on a per-statement basis, which allows the JSON key object to be sent in the `CREDENTIALS` parameter. The JSON key object should be Base64-encoded (using the standard encoding in [RFC 4648](https://tools.ietf.org/html/rfc4648)). + +- 3 You can create your own HTTP server with [Caddy or nginx]({% link {{ page.version.version }}/use-a-local-file-server.md %}). A custom root CA can be appended to the system's default CAs by setting the `cloudstorage.http.custom_ca` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}), which will be used when verifying certificates from HTTPS URLs. + +- 4 The file system backup location on the NFS drive is relative to the path specified by the `--external-io-dir` flag set while [starting the node]({% link {{ page.version.version }}/cockroach-start.md %}). If the flag is set to `disabled`, then imports from local directories and NFS drives are disabled. + +- 5 Using a `nodeID` is required and the data files will be in the `extern` directory of the specified node. In most cases (including single-node clusters), using `nodelocal://1/` is sufficient. Use `self` if you do not want to specify a `nodeID`, and the individual data files will be in the `extern` directories of arbitrary nodes; however, to work correctly, each node must have the [`--external-io-dir` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) point to the same NFS mount or other network-backed, shared storage. + +- 6 A custom root CA can be appended to the system's default CAs by setting the `cloudstorage.http.custom_ca` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}), which will be used when verifying certificates from an S3-compatible service. + +- 7 The `AWS_REGION` parameter is optional since it is not a required parameter for most S3-compatible services. Specify the parameter only if your S3-compatible service requires it. + +#### Example file URLs + +Location | Example +-------------+---------------------------------------------------------------------------------- +Amazon S3 | `s3://acme-co/employees?AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456` +Azure | `azure-blob://employees?AZURE_ACCOUNT_KEY=123&AZURE_ACCOUNT_NAME=acme-co` +Google Cloud | `gs://acme-co` +HTTP | `http://localhost:8080/employees` +NFS/Local | `nodelocal://1/path/employees`, `nodelocal://self/nfsmount/backups/employees` [5](#considerations) diff --git a/src/current/_includes/v25.3/cdc/filter-show-changefeed-jobs-columns.md b/src/current/_includes/v25.3/cdc/filter-show-changefeed-jobs-columns.md new file mode 100644 index 00000000000..39471ac538d --- /dev/null +++ b/src/current/_includes/v25.3/cdc/filter-show-changefeed-jobs-columns.md @@ -0,0 +1,11 @@ +You can filter the columns that `SHOW CHANGEFEED JOBS` displays using a `SELECT` statement: + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT job_id, sink_uri, status, format FROM [SHOW CHANGEFEED JOBS] WHERE job_id = 997306743028908033; +~~~ +~~~ + job_id | sink_uri | status | format +---------------------+------------------+----------+--------- + 997306743028908033 | external://kafka | running | json +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/kafka-acks.md b/src/current/_includes/v25.3/cdc/kafka-acks.md new file mode 100644 index 00000000000..dec1ba68392 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/kafka-acks.md @@ -0,0 +1 @@ +You must also set `acks` to `ALL` in your [server-side Kafka configuration](https://kafka.apache.org/documentation/#producerconfigs_acks) for this to provide high durability delivery. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/kafka-vpc-limitation.md b/src/current/_includes/v25.3/cdc/kafka-vpc-limitation.md new file mode 100644 index 00000000000..4f27e1b4777 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/kafka-vpc-limitation.md @@ -0,0 +1 @@ +[VPC Peering]({% link cockroachcloud/network-authorization.md %}#vpc-peering) and [AWS PrivateLink]({% link cockroachcloud/network-authorization.md %}#aws-privatelink) in CockroachDB {{ site.data.products.advanced }} clusters do **not** support connecting to a [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) sink's internal IP addresses for [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}). To connect to a Kafka sink from CockroachDB {{ site.data.products.advanced }}, it is necessary to expose the Kafka cluster's external IP address and open ports with firewall rules to allow access from a CockroachDB {{ site.data.products.advanced }} cluster. diff --git a/src/current/_includes/v25.3/cdc/lagging-ranges.md b/src/current/_includes/v25.3/cdc/lagging-ranges.md new file mode 100644 index 00000000000..8316c347dda --- /dev/null +++ b/src/current/_includes/v25.3/cdc/lagging-ranges.md @@ -0,0 +1,12 @@ +Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#range) that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options): + +- `lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#lagging-ranges-metric) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease. + - **Default:** `3m` +- `lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward. + - **Default:** `1m` + +Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `lagging_ranges_polling_interval` option. + +{{site.data.alerts.callout_success}} +You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/list-cloud-changefeed-uris.md b/src/current/_includes/v25.3/cdc/list-cloud-changefeed-uris.md new file mode 100644 index 00000000000..6c9849b111e --- /dev/null +++ b/src/current/_includes/v25.3/cdc/list-cloud-changefeed-uris.md @@ -0,0 +1,6 @@ +Location | Example +-------------+---------------------------------------------------------------------------------- +Amazon S3 | `'s3://{BUCKET NAME}/{PATH}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}'` +Azure Blob Storage | `'azure://{CONTAINER NAME}/{PATH}?AZURE_ACCOUNT_NAME={ACCOUNT NAME}&AZURE_ACCOUNT_KEY={URL-ENCODED KEY}'` +Google Cloud | `'gs://{BUCKET NAME}/{PATH}?AUTH=specified&CREDENTIALS={ENCODED KEY}'` +HTTP | `'file-http(s)://localhost:8080/{PATH}'` or `'http(s)://localhost:8080/{PATH}'`

**Note:** Using `http(s)` without the `file-` prefix is deprecated as a [changefeed sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) scheme. There is continued support for `http(s)`, but it will be removed in a future release. We recommend implementing the `file-http(s)` scheme for changefeed messages. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/message-format-list.md b/src/current/_includes/v25.3/cdc/message-format-list.md new file mode 100644 index 00000000000..7f77dbba575 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/message-format-list.md @@ -0,0 +1,6 @@ +By default, changefeeds emit messages in JSON format. You can use a different format by [creating a changefeed](create-changefeed.html) with the [`format`](create-changefeed.html#format) option and specifying one of the following: + +- `json` +- `csv` +- `avro` +- `parquet` \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/metrics-labels.md b/src/current/_includes/v25.3/cdc/metrics-labels.md new file mode 100644 index 00000000000..6f97eaffcdd --- /dev/null +++ b/src/current/_includes/v25.3/cdc/metrics-labels.md @@ -0,0 +1,8 @@ +To measure metrics per changefeed, you can define a "metrics label" for one or multiple changefeed(s). The changefeed(s) will increment each [changefeed metric]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#metrics). Metrics label information is sent with time-series metrics to `http://{host}:{http-port}/_status/vars`, viewable via the [Prometheus endpoint]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint). An aggregated metric of all changefeeds is also measured. + +It is necessary to consider the following when applying metrics labels to changefeeds: + +- The `server.child_metrics.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) must be set to `true` before using the `metrics_label` option. `server.child_metrics.enabled` is enabled by default in {{ site.data.products.standard }} and {{ site.data.products.basic }}. +- Metrics label information is sent to the `_status/vars` endpoint, but will **not** show up in [`debug.zip`]({% link {{ page.version.version }}/cockroach-debug-zip.md %}) or the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). +- Introducing labels to isolate a changefeed's metrics can increase cardinality significantly. There is a limit of 1024 unique labels in place to prevent cardinality explosion. That is, when labels are applied to high-cardinality data (data with a higher number of unique values), each changefeed with a label then results in more metrics data to multiply together, which will grow over time. This will have an impact on performance as the metric-series data per changefeed quickly populates against its label. +- The maximum length of a metrics label is 128 bytes. diff --git a/src/current/_includes/v25.3/cdc/modify-changefeed.md b/src/current/_includes/v25.3/cdc/modify-changefeed.md new file mode 100644 index 00000000000..fde29d8687e --- /dev/null +++ b/src/current/_includes/v25.3/cdc/modify-changefeed.md @@ -0,0 +1,9 @@ +To modify an {{ site.data.products.enterprise }} changefeed, [pause]({% link {{ page.version.version }}/create-and-configure-changefeeds.md %}#pause) the job and then use: + +~~~ sql +ALTER CHANGEFEED job_id {ADD table DROP table SET option UNSET option}; +~~~ + +You can add new table targets, remove them, set new [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options), and unset them. + +For more information, see [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}). diff --git a/src/current/_includes/v25.3/cdc/msk-dedicated-support.md b/src/current/_includes/v25.3/cdc/msk-dedicated-support.md new file mode 100644 index 00000000000..4e5ed34b941 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/msk-dedicated-support.md @@ -0,0 +1 @@ +You can stream a changefeed to a public IP MSK endpoint from any CockroachDB cluster. If you would like to connect a changefeed running on a CockroachDB {{ site.data.products.advanced }} cluster to an Amazon MSK Serverless cluster over AWS PrivateLink, contact your Cockroach Labs account team. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/msk-iam-policy-role-step.md b/src/current/_includes/v25.3/cdc/msk-iam-policy-role-step.md new file mode 100644 index 00000000000..0758d426419 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/msk-iam-policy-role-step.md @@ -0,0 +1,51 @@ +1. In the AWS Management Console, go to the [IAM console](https://console.aws.amazon.com/iam/), select **Policies** from the navigation, and then **Create Policy**. +1. Using the **JSON** tab option, update the policy with the following JSON. These permissions will allow you to connect to the cluster, manage topics, and consume messages. You may want to adjust the permissions to suit your permission model. For more details on the available permissions, refer to the AWS documentation on [IAM Access Control](https://docs.aws.amazon.com/msk/latest/developerguide/iam-access-control.html#kafka-actions) for MSK. + + Replace the instances of `arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}` with the MSK ARN from your cluster's summary page and add `/*` to the end, like the following: + + {% include_cached copy-clipboard.html %} + ~~~json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:Connect", + "kafka-cluster:AlterCluster", + "kafka-cluster:DescribeCluster" + ], + "Resource": [ + "arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic", + "kafka-cluster:WriteData", + "kafka-cluster:ReadData" + ], + "Resource": [ + "arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:AlterGroup", + "kafka-cluster:DescribeGroup" + ], + "Resource": [ + "arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}/*" + ] + } + ] + } + ~~~ + +1. Once you have added your policy, add a policy name (for example, `msk-policy`), click **Next**, and **Create policy**. +1. Return to the [IAM console](https://console.aws.amazon.com/iam/), select **Roles** from the navigation, and then **Create role**. +1. Select **AWS service** for the **Trusted entity type**. For **Use case**, select **EC2** from the dropdown. Click **Next**. +1. On the **Add permissions** page, search for the IAM policy (`msk-policy`) you just created. Click **Next**. +1. Name the role (for example, `msk-role`) and click **Create role**. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/msk-tutorial-crdb-setup.md b/src/current/_includes/v25.3/cdc/msk-tutorial-crdb-setup.md new file mode 100644 index 00000000000..40de46f2af7 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/msk-tutorial-crdb-setup.md @@ -0,0 +1,33 @@ +1. (Optional) On the EC2 instance running CockroachDB, run the [Movr]({% link {{ page.version.version }}/movr.md %}) application workload to set up some data for your changefeed. + + Create the schema for the workload: + + {% include_cached copy-clipboard.html %} + ~~~shell + cockroach workload init movr + ~~~ + + Then run the workload: + + {% include_cached copy-clipboard.html %} + ~~~shell + cockroach workload run movr --duration=1m + ~~~ + +1. Start a SQL session. For details on the available flags, refer to the [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}) page. + + {% include_cached copy-clipboard.html %} + ~~~ shell + cockroach sql --insecure + ~~~ + + {{site.data.alerts.callout_info}} + To set your {{ site.data.products.enterprise }} license, refer to the [Licensing FAQs]({% link {{ page.version.version }}/licensing-faqs.md %}#set-a-license) page. + {{site.data.alerts.end}} + +1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + SET CLUSTER SETTING kv.rangefeed.enabled = true; + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/mux-rangefeed.md b/src/current/_includes/v25.3/cdc/mux-rangefeed.md new file mode 100644 index 00000000000..964918545df --- /dev/null +++ b/src/current/_includes/v25.3/cdc/mux-rangefeed.md @@ -0,0 +1,3 @@ +`MuxRangefeed` is enabled by default. + +`MuxRangefeed` is a subsystem that improves the performance of rangefeeds with scale. It significantly reduces the overhead of running rangefeeds. Without `MuxRangefeed`, the number of RPC streams is proportional with the number of ranges in a table. For example, a large table could have tens of thousands of ranges. With `MuxRangefeed`, this proportion improves so that the number of RPC streams is relative to the number of nodes in a cluster. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/note-changefeed-message-page.md b/src/current/_includes/v25.3/cdc/note-changefeed-message-page.md new file mode 100644 index 00000000000..4570458c8be --- /dev/null +++ b/src/current/_includes/v25.3/cdc/note-changefeed-message-page.md @@ -0,0 +1 @@ +For an overview of the messages emitted from changefeeds, see the [Changefeed Messages]({% link {{ page.version.version }}/changefeed-messages.md %}) page. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/oauth-description.md b/src/current/_includes/v25.3/cdc/oauth-description.md new file mode 100644 index 00000000000..a12ea818927 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/oauth-description.md @@ -0,0 +1 @@ +[OAuth 2.0](https://oauth.net/2/) authentication uses credentials managed by a third-party provider (IdP) to authenticate with Kafka instead of requiring you to provide your Kafka cluster credentials directly in a [`CREATE CHANGEFEED`]({% link {{ page.version.version }}/create-changefeed.md %}) statement. Your provider's authentication server will issue a temporary token, giving you flexibility to apply access rules on the credentials that your IdP provides. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/options-table-note.md b/src/current/_includes/v25.3/cdc/options-table-note.md new file mode 100644 index 00000000000..7ae80055e08 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/options-table-note.md @@ -0,0 +1 @@ +This table shows the parameters for changefeeds to a specific sink. The `CREATE CHANGEFEED` page provides a list of all the available [options]({% link {{ page.version.version }}/create-changefeed.md %}#options). diff --git a/src/current/_includes/v25.3/cdc/print-key.md b/src/current/_includes/v25.3/cdc/print-key.md new file mode 100644 index 00000000000..ab0b0924d30 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/print-key.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +This example only prints the value. To print both the key and value of each message in the changefeed (e.g., to observe what happens with `DELETE`s), use the `--property print.key=true` flag. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/cdc/privilege-model.md b/src/current/_includes/v25.3/cdc/privilege-model.md new file mode 100644 index 00000000000..0b38ce782f4 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/privilege-model.md @@ -0,0 +1,66 @@ +{{site.data.alerts.callout_danger}} +As of v25.1, **viewing and managing** a changefeed job by users with the [`CHANGEFEED` privilege](#changefeed-privilege) is **deprecated**. This functionality of the `CHANGEFEED` privilege will be removed in a future release. + +We recommend transitioning users that need to view and manage running changefeed jobs to [roles]({% link {{ page.version.version }}/create-role.md %}) that own the [jobs]({% link {{ page.version.version }}/show-jobs.md %}) or [granting]({% link {{ page.version.version }}/grant.md %}) them the `VIEWJOB` or `CONTROLJOB` privilege. For more details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs). +{{site.data.alerts.end}} + +### Privilege model + +{{site.data.alerts.callout_success}} +For fine-grained access control, we recommend using the system-level privileges [`CHANGEFEED`](#changefeed-privilege) and [`CONTROLJOB` / `VIEWJOB`](#view-and-manage-changefeed-jobs). +{{site.data.alerts.end}} + +The following summarizes the operations users can run depending on whether the assigned privileges are at the job or table level: + +Granted privileges | Usage +-------------------+------- +`CHANGEFEED` | Create changefeeds on tables. For details, refer to [`CHANGEFEED` privilege](#changefeed-privilege).
**Deprecated**: View and manage changefeed jobs on tables. Instead, transition users that need to view and manage running changefeed jobs to [roles]({% link {{ page.version.version }}/create-role.md %}) that own the [jobs]({% link {{ page.version.version }}/show-jobs.md %}) or [granting]({% link {{ page.version.version }}/grant.md %}) them the `VIEWJOB` or `CONTROLJOB` privilege. For more details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs). +`CHANGEFEED` + [`USAGE`]({% link {{ page.version.version }}/create-external-connection.md %}#required-privileges) on external connection | Create changefeeds on tables to an external connection URI. For details, refer to [`CHANGEFEED` privilege](#changefeed-privilege).
**Deprecated**: View and manage changefeed jobs on tables. Instead, transition users that need to view and manage running changefeed jobs to [roles]({% link {{ page.version.version }}/create-role.md %}) that own the [jobs]({% link {{ page.version.version }}/show-jobs.md %}) or [granting]({% link {{ page.version.version }}/grant.md %}) them the `VIEWJOB` or `CONTROLJOB` privilege. For more details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs).

**Note:** If you need to manage access to changefeed sink URIs, set the `changefeed.permissions.require_external_connection_sink.enabled=true` cluster setting. This will mean that users with these privileges can **only** create changefeeds on external connections. +Job ownership | [View]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs) and manage changefeed jobs ([pause]({% link {{ page.version.version }}/pause-job.md %}), [resume]({% link {{ page.version.version }}/resume-job.md %}), and [cancel]({% link {{ page.version.version }}/cancel-job.md %})). For details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs). +`CONTROLJOB` | Manage changefeed jobs ([pause]({% link {{ page.version.version }}/pause-job.md %}), [resume]({% link {{ page.version.version }}/resume-job.md %}), and [cancel]({% link {{ page.version.version }}/cancel-job.md %})). For details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs). +`VIEWJOB` | [View]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs) changefeed jobs. For details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs). +`SELECT` | Create a sinkless changefeed that emits messages to a SQL client. +**Deprecated** `CONTROLCHANGEFEED` role option + `SELECT` | Create changefeeds on tables. Users with the `CONTROLCHANGEFEED` role option must have `SELECT` on each table, even if they are also granted the `CHANGEFEED` privilege.

The `CONTROLCHANGEFEED` role option will be removed in a future release. We recommend using the system-level privileges [`CHANGEFEED`](#changefeed-privilege) and [`CONTROLJOB`/ `VIEWJOB`](#view-and-manage-changefeed-jobs) for fine-grained access control. +`admin` | Create, view, and manage changefeed jobs. + +#### `CHANGEFEED` privilege + +{{site.data.alerts.callout_info}} +Viewing and managing changefeed jobs with the `CHANGEFEED` privilege is **deprecated** as of v25.1. Instead, transition users that need to view and manage running changefeed jobs to [roles]({% link {{ page.version.version }}/create-role.md %}) that own the [jobs]({% link {{ page.version.version }}/show-jobs.md %}) or [granting]({% link {{ page.version.version }}/grant.md %}) them the `VIEWJOB` or `CONTROLJOB` privilege. For more details, refer to [View and manage changefeed jobs](#view-and-manage-changefeed-jobs). +{{site.data.alerts.end}} + +You can [grant]({% link {{ page.version.version }}/grant.md %}#grant-privileges-on-specific-tables-in-a-database) a user the `CHANGEFEED` privilege to allow them to create changefeeds on a specific table: + +{% include_cached copy-clipboard.html %} +~~~sql +GRANT CHANGEFEED ON TABLE example_table TO user; +~~~ + +When you grant a user the `CHANGEFEED` privilege on a set of tables, they can create changefeeds on the target tables even if the user does **not** have the [`CONTROLCHANGEFEED` role option]({% link {{ page.version.version }}/alter-role.md %}#role-options) or the `SELECT` privilege on the tables. + +These users will be able to create changefeeds, but they will not be able to run a `SELECT` query on that data directly. However, they could still read this data indirectly if they have read access to the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}). + +You can add `CHANGEFEED` to the user or role's [default privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#default-privileges) with [`ALTER DEFAULT PRIVILEGES`]({% link {{ page.version.version }}/alter-default-privileges.md %}#grant-default-privileges-to-a-specific-role): + +{% include_cached copy-clipboard.html %} +~~~sql +ALTER DEFAULT PRIVILEGES GRANT CHANGEFEED ON TABLES TO user; +~~~ + +{% include {{ page.version.version }}/cdc/ext-conn-cluster-setting.md %} + +#### View and manage changefeed jobs + +Users can [view]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs) and manage changefeed jobs when one of the following are met: + +- **Job ownership**: They own the job, or are a member of a role that owns a job. +- **Global privileges**: They are assigned [`CONTROLJOB` or `VIEWJOB`]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). + +To give a set of users access to a specific job, or set of jobs, assign them to a [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#users-and-roles) that owns the job(s). + +You can transfer ownership of a job to a role or user using the [`ALTER JOB`]({% link {{ page.version.version }}/alter-job.md %}) statement: + +{% include_cached copy-clipboard.html %} +~~~sql +ALTER JOB job_ID OWNER TO role_name; +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/pts-gc-monitoring.md b/src/current/_includes/v25.3/cdc/pts-gc-monitoring.md new file mode 100644 index 00000000000..11a2c4d1fd0 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/pts-gc-monitoring.md @@ -0,0 +1,6 @@ +You can monitor changefeed jobs for [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) usage. We recommend setting up {% if page.name == "monitor-and-debug-changefeeds.md" %} monitoring {% else %} [monitoring]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}) {% endif %}for the following metrics: + +- `jobs.changefeed.protected_age_sec`: Tracks the age of the oldest [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) record protected by changefeed jobs. We recommend monitoring if `protected_age_sec` is greater than [`gc.ttlseconds`]({% link {{ page.version.version }}/configure-replication-zones.md %}#gc-ttlseconds). As `protected_age_sec` increases, garbage accumulation increases. [Garbage collection]({% link {{ page.version.version }}/architecture/storage-layer.md %}#garbage-collection) will not progress on a table, database, or cluster if the protected timestamp record is present. +- `jobs.changefeed.currently_paused`: Tracks the number of changefeed jobs currently considered [paused]({% link {{ page.version.version }}/pause-job.md %}). Since paused changefeed jobs can accumulate garbage, it is important to [monitor the number of paused changefeeds]({% link {{ page.version.version }}/pause-job.md %}#monitoring-paused-jobs). +- `jobs.changefeed.expired_pts_records`: Tracks the number of expired [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records owned by changefeed jobs. You can monitor this metric in conjunction with the [`gc_protect_expires_after` option]({% link {{ page.version.version }}/create-changefeed.md %}#gc-protect-expires-after). +- `jobs.changefeed.protected_record_count`: Tracks the number of [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records held by changefeed jobs. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/recommendation-monitoring-pts.md b/src/current/_includes/v25.3/cdc/recommendation-monitoring-pts.md new file mode 100644 index 00000000000..c802caf5cd3 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/recommendation-monitoring-pts.md @@ -0,0 +1 @@ +Cockroach Labs recommends monitoring your changefeeds to track [retryable errors]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#changefeed-retry-errors) and [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) usage. Refer to the [Monitor and Debug Changefeeds]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}) page for more information. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/schedule-query-example.md b/src/current/_includes/v25.3/cdc/schedule-query-example.md new file mode 100644 index 00000000000..7a692f21eb4 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/schedule-query-example.md @@ -0,0 +1,14 @@ +This example creates a nightly export of some filtered table data with a [scheduled changefeed]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) that will run just after midnight every night. The changefeed uses [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) to query the table and filter the data it will send to the sink: + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE SCHEDULE sf_skateboard FOR CHANGEFEED INTO 'external://cloud-sink' WITH format=csv + AS SELECT current_location, id, type, status FROM vehicles + WHERE city = 'san francisco' AND type = 'skateboard' + RECURRING '1 0 * * *' WITH SCHEDULE OPTIONS on_execution_failure=retry, on_previous_running=start; +~~~ + +The [schedule options]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}#schedule-options) control the schedule's behavior: + +- If it runs into a failure, `on_execution_failure=retry` will ensure that the schedule retries the changefeed immediately. +- If the previous scheduled changefeed is still running, `on_previous_running=start` will start a new changefeed at the defined cadence. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/schema-registry-metric.md b/src/current/_includes/v25.3/cdc/schema-registry-metric.md new file mode 100644 index 00000000000..b9482feafdc --- /dev/null +++ b/src/current/_includes/v25.3/cdc/schema-registry-metric.md @@ -0,0 +1 @@ +Use the `changefeed.schema_registry.retry_count` metric to measure the number of request retries performed when sending requests to the schema registry. For more detail on monitoring changefeeds, refer to [Monitor and Debug Changefeeds]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/schema-registry-timeout.md b/src/current/_includes/v25.3/cdc/schema-registry-timeout.md new file mode 100644 index 00000000000..eec8371f282 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/schema-registry-timeout.md @@ -0,0 +1 @@ +Use the {% if page.name == "create-changefeed.md" %} `timeout={duration}` query parameter {% else %} [`timeout={duration}` query parameter]({% link {{ page.version.version }}/create-changefeed.md %}#confluent-schema-registry) {% endif %}([duration string](https://pkg.go.dev/time#ParseDuration)) in your Confluent Schema Registry URI to change the default timeout for contacting the schema registry. By default, the timeout is 30 seconds. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/show-changefeed-job-retention.md b/src/current/_includes/v25.3/cdc/show-changefeed-job-retention.md new file mode 100644 index 00000000000..4103aede6b1 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/show-changefeed-job-retention.md @@ -0,0 +1 @@ +`SHOW CHANGEFEED JOBS` will return all changefeed jobs from the last 12 hours. For more information on the retention of job details, refer to the {% if page.name == "show-jobs.md" %} [Response](#response) {% else %} [Response]({% link {{ page.version.version }}/show-jobs.md %}#response) {% endif %} section. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/show-changefeed-job.md b/src/current/_includes/v25.3/cdc/show-changefeed-job.md new file mode 100644 index 00000000000..02893bc7766 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/show-changefeed-job.md @@ -0,0 +1,24 @@ +{% include_cached copy-clipboard.html %} +~~~ sql +SHOW CHANGEFEED JOBS; +~~~ +~~~ + job_id | description | ... ++----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ ... + 685724608744325121 | CREATE CHANGEFEED FOR TABLE mytable INTO 'kafka://localhost:9092' WITH confluent_schema_registry = 'http://localhost:8081', format = 'avro', resolved, updated | ... + 685723987509116929 | CREATE CHANGEFEED FOR TABLE mytable INTO 'kafka://localhost:9092' WITH confluent_schema_registry = 'http://localhost:8081', format = 'avro', resolved, updated | ... +(2 rows) +~~~ + +To show an individual {{ site.data.products.enterprise }} changefeed: + +{% include_cached copy-clipboard.html %} +~~~ sql +SHOW CHANGEFEED JOB {job_id}; +~~~ +~~~ + job_id | description | user_name | status | running_status | created | started | finished | modified | high_water_timestamp | readable_high_water_timestamptz | error | sink_uri | full_table_names | topics | format +----------------------+---------------------------------------------------------------------------------------------------------+-----------+---------+------------------------------------------+-------------------------------+-------------------------------+----------+-------------------------------+--------------------------------+---------------------------------+-------+------------------------------------------------------------+----------------------------------+--------+--------- + 1053639803034894337 | CREATE CHANGEFEED FOR TABLE customers INTO 'gs://bucket-name?AUTH=specified&CREDENTIALS=redacted' | root | running | running: resolved=1741616141.951323000,0 | 2025-03-10 14:09:10.047524+00 | 2025-03-10 14:09:10.047524+00 | NULL | 2025-03-10 14:15:44.955653+00 | 1741616141951323000.0000000000 | 2025-03-10 14:15:41.951323+00 | | gs://bucket-name?AUTH=specified&CREDENTIALS=redacted | {online_retail.public.customers} | NULL | json +(1 row) +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/sink-URI-external-connection.md b/src/current/_includes/v25.3/cdc/sink-URI-external-connection.md new file mode 100644 index 00000000000..90ab96f315f --- /dev/null +++ b/src/current/_includes/v25.3/cdc/sink-URI-external-connection.md @@ -0,0 +1 @@ +You can create an external connection to represent a changefeed sink URI. This allows you to specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/sink-configuration-detail.md b/src/current/_includes/v25.3/cdc/sink-configuration-detail.md new file mode 100644 index 00000000000..ed96a0ead52 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/sink-configuration-detail.md @@ -0,0 +1,28 @@ +{{site.data.alerts.callout_danger}} +Setting either `Messages` or `Bytes` with a non-zero value without setting `Frequency` will cause the sink to assume `Frequency` has an infinity value. If either `Messages` or `Bytes` have a non-zero value, then a non-zero value for `Frequency` **must** be provided. This configuration is invalid and will cause an error, since the messages could sit in a batch indefinitely if the other conditions do not trigger. +{{site.data.alerts.end}} + +Some complexities to consider when setting `Flush` fields for batching: + +- When all batching parameters are zero (`"Messages"`, `"Bytes"`, and `"Frequency"`) the sink will interpret this configuration as "send batch every time a message is available." This would be the same as not providing any configuration at all: + + ~~~ + { + "Flush": { + "Messages": 0, + "Bytes": 0, + "Frequency": "0s" + } + } + ~~~ + +- If one or more fields are set as non-zero values, any fields with a zero value the sink will interpret as infinity. For example, in the following configuration, the sink will send a batch whenever the size reaches 100 messages, **or**, when 5 seconds has passed since the batch was populated with its first message. `Bytes` is unset, so the batch size is unlimited. No flush will be triggered due to batch size: + + ~~~ + { + "Flush": { + "Messages": 100, + "Frequency": "5s" + } + } + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/sink-list.md b/src/current/_includes/v25.3/cdc/sink-list.md new file mode 100644 index 00000000000..6468b3d317c --- /dev/null +++ b/src/current/_includes/v25.3/cdc/sink-list.md @@ -0,0 +1,8 @@ +- {% if page.name == "changefeed-sinks.md" %} [Amazon MSK](#amazon-msk) {% else %} [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk) {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Apache Pulsar](#apache-pulsar) (in Preview) {% else %} [Apache Pulsar]({% link {{ page.version.version }}/changefeed-sinks.md %}#apache-pulsar) (in Preview) {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Azure Event Hubs](#azure-event-hubs) {% else %} [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs) {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Cloud Storage](#cloud-storage-sink) / HTTP {% else %} [Cloud Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink) / HTTP {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Confluent Cloud](#confluent-cloud) {% else %} [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Google Cloud Pub/Sub](#google-cloud-pub-sub) {% else %} [Google Cloud Pub/Sub]({% link {{ page.version.version }}/changefeed-sinks.md %}#google-cloud-pub-sub) {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Kafka](#kafka) {% else %} [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) {% endif %} +- {% if page.name == "changefeed-sinks.md" %} [Webhook](#webhook-sink) {% else %} [Webhook]({% link {{ page.version.version }}/changefeed-sinks.md %}#webhook-sink) {% endif %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/sql-cluster-settings-example.md b/src/current/_includes/v25.3/cdc/sql-cluster-settings-example.md new file mode 100644 index 00000000000..fa2887967a1 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/sql-cluster-settings-example.md @@ -0,0 +1,27 @@ +1. As the `root` user, open the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --insecure + ~~~ + +1. Set your organization name and [{{ site.data.products.enterprise }} license]({% link {{ page.version.version }}/licensing-faqs.md %}#types-of-licenses) key: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SET CLUSTER SETTING cluster.organization = ''; + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SET CLUSTER SETTING enterprise.license = ''; + ~~~ + +1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SET CLUSTER SETTING kv.rangefeed.enabled = true; + ~~~ + + {% include {{ page.version.version }}/cdc/cdc-cloud-rangefeed.md %} diff --git a/src/current/_includes/v25.3/cdc/tutorial-privilege-check.md b/src/current/_includes/v25.3/cdc/tutorial-privilege-check.md new file mode 100644 index 00000000000..4ad9801b37a --- /dev/null +++ b/src/current/_includes/v25.3/cdc/tutorial-privilege-check.md @@ -0,0 +1 @@ +The `CHANGEFEED` privilege in order to create and manage changefeed jobs. Refer to [Required privileges]({% link {{ page.version.version }}/create-changefeed.md %}#required-privileges) for more details. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/types-udt-composite-general.md b/src/current/_includes/v25.3/cdc/types-udt-composite-general.md new file mode 100644 index 00000000000..f702cfec5cb --- /dev/null +++ b/src/current/_includes/v25.3/cdc/types-udt-composite-general.md @@ -0,0 +1 @@ +Changefeed types are not fully integrated with [user-defined composite types]({% link {{ page.version.version }}/create-type.md %}). Running changefeeds with user-defined composite types is in [Preview]({% link {{ page.version.version }}/cockroachdb-feature-availability.md %}#feature-availability-phases). Certain changefeed types do not support user-defined composite types. Refer to the change data capture [Known Limitations]({% link {{ page.version.version }}/create-and-configure-changefeeds.md %}#known-limitations) for more detail. \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/url-encoding.md b/src/current/_includes/v25.3/cdc/url-encoding.md new file mode 100644 index 00000000000..eb12a94bbe0 --- /dev/null +++ b/src/current/_includes/v25.3/cdc/url-encoding.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +Parameters should always be URI-encoded before they are included the changefeed's URI, as they often contain special characters. Use Javascript's [encodeURIComponent](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) function or Go language's [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape) function to URI-encode the parameters. Other languages provide similar functions to URI-encode special characters. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/cdc/virtual-computed-column-cdc.md b/src/current/_includes/v25.3/cdc/virtual-computed-column-cdc.md new file mode 100644 index 00000000000..60910bb817a --- /dev/null +++ b/src/current/_includes/v25.3/cdc/virtual-computed-column-cdc.md @@ -0,0 +1 @@ +As of v22.1, changefeeds filter out [`VIRTUAL` computed columns]({% link {{ page.version.version }}/computed-columns.md %}) from events by default. This is a [backward-incompatible change]({% link releases/v22.1.md %}#v22-1-0-backward-incompatible-changes). To maintain the changefeed behavior in previous versions where [`NULL`]({% link {{ page.version.version }}/null-handling.md %}) values are emitted for virtual computed columns, see the [`virtual_columns`]({% link {{ page.version.version }}/create-changefeed.md %}#virtual-columns) option for more detail. diff --git a/src/current/_includes/v25.3/cdc/webhook-beta.md b/src/current/_includes/v25.3/cdc/webhook-beta.md new file mode 100644 index 00000000000..5d27a27585e --- /dev/null +++ b/src/current/_includes/v25.3/cdc/webhook-beta.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +The webhook sink is currently in **beta** — see [usage considerations]({% link {{ page.version.version }}/changefeed-sinks.md %}#webhook-sink), available [parameters]({% link {{ page.version.version }}/create-changefeed.md %}#parameters), and [options]({% link {{ page.version.version }}/create-changefeed.md %}#options) for more information. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/client-transaction-retry.md b/src/current/_includes/v25.3/client-transaction-retry.md new file mode 100644 index 00000000000..4c65eebea7f --- /dev/null +++ b/src/current/_includes/v25.3/client-transaction-retry.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +With the default `SERIALIZABLE` [isolation level]({% link {{ page.version.version }}/transactions.md %}#isolation-levels), CockroachDB may require the client to [retry a transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-retries) in case of read/write [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention). CockroachDB provides a [generic retry function](transaction-retry-error-reference.html#client-side-retry-handling) that runs inside a transaction and retries it as needed. The code sample below shows how it is used. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/computed-columns/add-computed-column.md b/src/current/_includes/v25.3/computed-columns/add-computed-column.md new file mode 100644 index 00000000000..5eff580e575 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/add-computed-column.md @@ -0,0 +1,55 @@ +In this example, create a table: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE x ( + a INT NULL, + b INT NULL AS (a * 2) STORED, + c INT NULL AS (a + 4) STORED, + FAMILY "primary" (a, b, rowid, c) + ); +~~~ + +Then, insert a row of data: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO x VALUES (6); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM x; +~~~ + +~~~ ++---+----+----+ +| a | b | c | ++---+----+----+ +| 6 | 12 | 10 | ++---+----+----+ +(1 row) +~~~ + +Now add another virtual computed column to the table: + +{% include_cached copy-clipboard.html %} +~~~ sql +> ALTER TABLE x ADD COLUMN d INT AS (a // 2) VIRTUAL; +~~~ + +The `d` column is added to the table and computed from the `a` column divided by 2. + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM x; +~~~ + +~~~ ++---+----+----+---+ +| a | b | c | d | ++---+----+----+---+ +| 6 | 12 | 10 | 3 | ++---+----+----+---+ +(1 row) +~~~ diff --git a/src/current/_includes/v25.3/computed-columns/alter-computed-column.md b/src/current/_includes/v25.3/computed-columns/alter-computed-column.md new file mode 100644 index 00000000000..d51d64a1df4 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/alter-computed-column.md @@ -0,0 +1,76 @@ +To alter the formula for a computed column, you must [`DROP`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) and [`ADD`]({% link {{ page.version.version }}/alter-table.md %}#add-column) the column back with the new definition. Take the following table for instance: + +{% include_cached copy-clipboard.html %} +~~~sql +> CREATE TABLE x ( +a INT NULL, +b INT NULL AS (a * 2) STORED, +c INT NULL AS (a + 4) STORED, +FAMILY "primary" (a, b, rowid, c) +); +~~~ +~~~ +CREATE TABLE + + +Time: 4ms total (execution 4ms / network 0ms) +~~~ + +Add a computed column `d`: + +{% include_cached copy-clipboard.html %} +~~~sql +> ALTER TABLE x ADD COLUMN d INT AS (a // 2) STORED; +~~~ +~~~ +ALTER TABLE + + +Time: 199ms total (execution 199ms / network 0ms) +~~~ + +If you try to alter it, you'll get an error: + +{% include_cached copy-clipboard.html %} +~~~sql +> ALTER TABLE x ALTER COLUMN d INT AS (a // 3) STORED; +~~~ +~~~ +invalid syntax: statement ignored: at or near "int": syntax error +SQLSTATE: 42601 +DETAIL: source SQL: +ALTER TABLE x ALTER COLUMN d INT AS (a // 3) STORED + ^ +HINT: try \h ALTER TABLE +~~~ + +However, you can drop it and then add it with the new definition: + +{% include_cached copy-clipboard.html %} +~~~sql +> SET sql_safe_updates = false; +> ALTER TABLE x DROP COLUMN d; +> ALTER TABLE x ADD COLUMN d INT AS (a // 3) STORED; +> SET sql_safe_updates = true; +~~~ +~~~ +SET + + +Time: 1ms total (execution 0ms / network 0ms) + +ALTER TABLE + + +Time: 195ms total (execution 195ms / network 0ms) + +ALTER TABLE + + +Time: 186ms total (execution 185ms / network 0ms) + +SET + + +Time: 0ms total (execution 0ms / network 0ms) +~~~ diff --git a/src/current/_includes/v25.3/computed-columns/convert-computed-column.md b/src/current/_includes/v25.3/computed-columns/convert-computed-column.md new file mode 100644 index 00000000000..2be9bf72587 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/convert-computed-column.md @@ -0,0 +1,108 @@ +You can convert a stored, computed column into a regular column by using `ALTER TABLE`. + +In this example, create a simple table with a computed column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE office_dogs ( + id INT PRIMARY KEY, + first_name STRING, + last_name STRING, + full_name STRING AS (CONCAT(first_name, ' ', last_name)) STORED + ); +~~~ + +Then, insert a few rows of data: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO office_dogs (id, first_name, last_name) VALUES + (1, 'Petee', 'Hirata'), + (2, 'Carl', 'Kimball'), + (3, 'Ernie', 'Narayan'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM office_dogs; +~~~ + +~~~ ++----+------------+-----------+---------------+ +| id | first_name | last_name | full_name | ++----+------------+-----------+---------------+ +| 1 | Petee | Hirata | Petee Hirata | +| 2 | Carl | Kimball | Carl Kimball | +| 3 | Ernie | Narayan | Ernie Narayan | ++----+------------+-----------+---------------+ +(3 rows) +~~~ + +The `full_name` column is computed from the `first_name` and `last_name` columns without the need to define a [view]({% link {{ page.version.version }}/views.md %}). You can view the column details with the [`SHOW COLUMNS`]({% link {{ page.version.version }}/show-columns.md %}) statement: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW COLUMNS FROM office_dogs; +~~~ + +~~~ ++-------------+-----------+-------------+----------------+------------------------------------+-------------+ +| column_name | data_type | is_nullable | column_default | generation_expression | indices | ++-------------+-----------+-------------+----------------+------------------------------------+-------------+ +| id | INT | false | NULL | | {"primary"} | +| first_name | STRING | true | NULL | | {} | +| last_name | STRING | true | NULL | | {} | +| full_name | STRING | true | NULL | concat(first_name, ' ', last_name) | {} | ++-------------+-----------+-------------+----------------+------------------------------------+-------------+ +(4 rows) +~~~ + +Now, convert the computed column (`full_name`) to a regular column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> ALTER TABLE office_dogs ALTER COLUMN full_name DROP STORED; +~~~ + +Check that the computed column was converted: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW COLUMNS FROM office_dogs; +~~~ + +~~~ ++-------------+-----------+-------------+----------------+-----------------------+-------------+ +| column_name | data_type | is_nullable | column_default | generation_expression | indices | ++-------------+-----------+-------------+----------------+-----------------------+-------------+ +| id | INT | false | NULL | | {"primary"} | +| first_name | STRING | true | NULL | | {} | +| last_name | STRING | true | NULL | | {} | +| full_name | STRING | true | NULL | | {} | ++-------------+-----------+-------------+----------------+-----------------------+-------------+ +(4 rows) +~~~ + +The computed column is now a regular column and can be updated as such: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO office_dogs (id, first_name, last_name, full_name) VALUES (4, 'Lola', 'McDog', 'This is not computed'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM office_dogs; +~~~ + +~~~ ++----+------------+-----------+----------------------+ +| id | first_name | last_name | full_name | ++----+------------+-----------+----------------------+ +| 1 | Petee | Hirata | Petee Hirata | +| 2 | Carl | Kimball | Carl Kimball | +| 3 | Ernie | Narayan | Ernie Narayan | +| 4 | Lola | McDog | This is not computed | ++----+------------+-----------+----------------------+ +(4 rows) +~~~ diff --git a/src/current/_includes/v25.3/computed-columns/jsonb.md b/src/current/_includes/v25.3/computed-columns/jsonb.md new file mode 100644 index 00000000000..3851d463245 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/jsonb.md @@ -0,0 +1,70 @@ +In this example, create a table with a `JSONB` column and a stored computed column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE student_profiles ( + id STRING PRIMARY KEY AS (profile->>'id') STORED, + profile JSONB +); +~~~ + +Create a compute column after you create a table: + +{% include_cached copy-clipboard.html %} +~~~ sql +> ALTER TABLE student_profiles ADD COLUMN age INT AS ( (profile->>'age')::INT) STORED; +~~~ + +Then, insert a few rows of data: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO student_profiles (profile) VALUES + ('{"id": "d78236", "name": "Arthur Read", "age": "16", "school": "PVPHS", "credits": 120, "sports": "none"}'), + ('{"name": "Buster Bunny", "age": "15", "id": "f98112", "school": "THS", "credits": 67, "clubs": "MUN"}'), + ('{"name": "Ernie Narayan", "school" : "Brooklyn Tech", "id": "t63512", "sports": "Track and Field", "clubs": "Chess"}'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM student_profiles; +~~~ +~~~ ++--------+---------------------------------------------------------------------------------------------------------------------+------+ +| id | profile | age | +---------+---------------------------------------------------------------------------------------------------------------------+------+ +| d78236 | {"age": "16", "credits": 120, "id": "d78236", "name": "Arthur Read", "school": "PVPHS", "sports": "none"} | 16 | +| f98112 | {"age": "15", "clubs": "MUN", "credits": 67, "id": "f98112", "name": "Buster Bunny", "school": "THS"} | 15 | +| t63512 | {"clubs": "Chess", "id": "t63512", "name": "Ernie Narayan", "school": "Brooklyn Tech", "sports": "Track and Field"} | NULL | ++--------+---------------------------------------------------------------------------------------------------------------------+------| +~~~ + +The primary key `id` is computed as a field from the `profile` column. Additionally the `age` column is computed from the profile column data as well. + +This example shows how add a stored computed column with a [coerced type]({% link {{ page.version.version }}/scalar-expressions.md %}#explicit-type-coercions): + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE json_data ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + json_info JSONB +); +INSERT INTO json_data (json_info) VALUES ('{"amount": "123.45"}'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE json_data ADD COLUMN amount DECIMAL AS ((json_info->>'amount')::DECIMAL) STORED; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM json_data; +~~~ + +~~~ + id | json_info | amount +---------------------------------------+----------------------+--------- + e7c3d706-1367-4d77-bfb4-386dfdeb10f9 | {"amount": "123.45"} | 123.45 +(1 row) +~~~ diff --git a/src/current/_includes/v25.3/computed-columns/secondary-index.md b/src/current/_includes/v25.3/computed-columns/secondary-index.md new file mode 100644 index 00000000000..8b78325e695 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/secondary-index.md @@ -0,0 +1,63 @@ +In this example, create a table with a virtual computed column and an index on that column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE gymnastics ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + athlete STRING, + vault DECIMAL, + bars DECIMAL, + beam DECIMAL, + floor DECIMAL, + combined_score DECIMAL AS (vault + bars + beam + floor) VIRTUAL, + INDEX total (combined_score DESC) + ); +~~~ + +Then, insert a few rows a data: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO gymnastics (athlete, vault, bars, beam, floor) VALUES + ('Simone Biles', 15.933, 14.800, 15.300, 15.800), + ('Gabby Douglas', 0, 15.766, 0, 0), + ('Laurie Hernandez', 15.100, 0, 15.233, 14.833), + ('Madison Kocian', 0, 15.933, 0, 0), + ('Aly Raisman', 15.833, 0, 15.000, 15.366); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM gymnastics; +~~~ +~~~ ++--------------------------------------+------------------+--------+--------+--------+--------+----------------+ +| id | athlete | vault | bars | beam | floor | combined_score | ++--------------------------------------+------------------+--------+--------+--------+--------+----------------+ +| 3fe11371-6a6a-49de-bbef-a8dd16560fac | Aly Raisman | 15.833 | 0 | 15.000 | 15.366 | 46.199 | +| 56055a70-b4c7-4522-909b-8f3674b705e5 | Madison Kocian | 0 | 15.933 | 0 | 0 | 15.933 | +| 69f73fd1-da34-48bf-aff8-71296ce4c2c7 | Gabby Douglas | 0 | 15.766 | 0 | 0 | 15.766 | +| 8a7b730b-668d-4845-8d25-48bda25114d6 | Laurie Hernandez | 15.100 | 0 | 15.233 | 14.833 | 45.166 | +| b2c5ca80-21c2-4853-9178-b96ce220ea4d | Simone Biles | 15.933 | 14.800 | 15.300 | 15.800 | 61.833 | ++--------------------------------------+------------------+--------+--------+--------+--------+----------------+ +~~~ + +Now, run a query using the secondary index: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT athlete, combined_score FROM gymnastics ORDER BY combined_score DESC; +~~~ +~~~ ++------------------+----------------+ +| athlete | combined_score | ++------------------+----------------+ +| Simone Biles | 61.833 | +| Aly Raisman | 46.199 | +| Laurie Hernandez | 45.166 | +| Madison Kocian | 15.933 | +| Gabby Douglas | 15.766 | ++------------------+----------------+ +~~~ + +The athlete with the highest combined score of 61.833 is Simone Biles. diff --git a/src/current/_includes/v25.3/computed-columns/simple.md b/src/current/_includes/v25.3/computed-columns/simple.md new file mode 100644 index 00000000000..3538a44f0d1 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/simple.md @@ -0,0 +1,40 @@ +In this example, let's create a simple table with a computed column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE users ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + city STRING, + first_name STRING, + last_name STRING, + full_name STRING AS (CONCAT(first_name, ' ', last_name)) STORED, + address STRING, + credit_card STRING, + dl STRING UNIQUE CHECK (LENGTH(dl) < 8) +); +~~~ + +Then, insert a few rows of data: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO users (first_name, last_name) VALUES + ('Lola', 'McDog'), + ('Carl', 'Kimball'), + ('Ernie', 'Narayan'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM users; +~~~ +~~~ + id | city | first_name | last_name | full_name | address | credit_card | dl ++--------------------------------------+------+------------+-----------+---------------+---------+-------------+------+ + 5740da29-cc0c-47af-921c-b275d21d4c76 | NULL | Ernie | Narayan | Ernie Narayan | NULL | NULL | NULL + e7e0b748-9194-4d71-9343-cd65218848f0 | NULL | Lola | McDog | Lola McDog | NULL | NULL | NULL + f00e4715-8ca7-4d5a-8de5-ef1d5d8092f3 | NULL | Carl | Kimball | Carl Kimball | NULL | NULL | NULL +(3 rows) +~~~ + +The `full_name` column is computed from the `first_name` and `last_name` columns without the need to define a [view]({% link {{ page.version.version }}/views.md %}). diff --git a/src/current/_includes/v25.3/computed-columns/virtual.md b/src/current/_includes/v25.3/computed-columns/virtual.md new file mode 100644 index 00000000000..4c6718d7552 --- /dev/null +++ b/src/current/_includes/v25.3/computed-columns/virtual.md @@ -0,0 +1,41 @@ +In this example, create a table with a `JSONB` column and virtual computed columns: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE student_profiles ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + profile JSONB, + full_name STRING AS (concat_ws(' ',profile->>'firstName', profile->>'lastName')) VIRTUAL, + birthday TIMESTAMP AS (parse_timestamp(profile->>'birthdate')) VIRTUAL +); +~~~ + +Then, insert a few rows of data: + +{% include_cached copy-clipboard.html %} +~~~ sql +> INSERT INTO student_profiles (profile) VALUES + ('{"id": "d78236", "firstName": "Arthur", "lastName": "Read", "birthdate": "2010-01-25", "school": "PVPHS", "credits": 120, "sports": "none"}'), + ('{"firstName": "Buster", "lastName": "Bunny", "birthdate": "2011-11-07", "id": "f98112", "school": "THS", "credits": 67, "clubs": "MUN"}'), + ('{"firstName": "Ernie", "lastName": "Narayan", "school" : "Brooklyn Tech", "id": "t63512", "sports": "Track and Field", "clubs": "Chess"}'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM student_profiles; +~~~ +~~~ + id | profile | full_name | birthday +---------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+---------------+---------------------- + 0e420282-105d-473b-83e2-3b082e7033e4 | {"birthdate": "2011-11-07", "clubs": "MUN", "credits": 67, "firstName": "Buster", "id": "f98112", "lastName": "Bunny", "school": "THS"} | Buster Bunny | 2011-11-07 00:00:00 + 6e9b77cd-ec67-41ae-b346-7b3d89902c72 | {"birthdate": "2010-01-25", "credits": 120, "firstName": "Arthur", "id": "d78236", "lastName": "Read", "school": "PVPHS", "sports": "none"} | Arthur Read | 2010-01-25 00:00:00 + f74b21e3-dc1e-49b7-a648-3c9b9024a70f | {"clubs": "Chess", "firstName": "Ernie", "id": "t63512", "lastName": "Narayan", "school": "Brooklyn Tech", "sports": "Track and Field"} | Ernie Narayan | NULL +(3 rows) + + +Time: 2ms total (execution 2ms / network 0ms) +~~~ + +The virtual column `full_name` is computed as a field from the `profile` column's data. The first name and last name are concatenated and separated by a single whitespace character using the [`concat_ws` string function]({% link {{ page.version.version }}/functions-and-operators.md %}#string-and-byte-functions). + +The virtual column `birthday` is parsed as a `TIMESTAMP` value from the `profile` column's `birthdate` string value. The [`parse_timestamp` function]({% link {{ page.version.version }}/functions-and-operators.md %}) is used to parse strings in `TIMESTAMP` format. diff --git a/src/current/_includes/v25.3/connect/cockroach-workload-parameters.md b/src/current/_includes/v25.3/connect/cockroach-workload-parameters.md new file mode 100644 index 00000000000..68e11059b9e --- /dev/null +++ b/src/current/_includes/v25.3/connect/cockroach-workload-parameters.md @@ -0,0 +1 @@ +The `cockroach workload` command does not support connection or security flags like other [`cockroach` commands]({% link {{ page.version.version }}/cockroach-commands.md %}). Instead, you must use a [connection string]({% link {{ page.version.version }}/connection-parameters.md %}) at the end of the command. \ No newline at end of file diff --git a/src/current/_includes/v25.3/connect/connection-url.md b/src/current/_includes/v25.3/connect/connection-url.md new file mode 100644 index 00000000000..ae994bb3047 --- /dev/null +++ b/src/current/_includes/v25.3/connect/connection-url.md @@ -0,0 +1,19 @@ +
+Set a `DATABASE_URL` environment variable to your connection string. + +{% include_cached copy-clipboard.html %} +~~~ shell +export DATABASE_URL="{connection string}" +~~~ + +
+ +
+Set a `DATABASE_URL` environment variable to your connection string. + +{% include_cached copy-clipboard.html %} +~~~ shell +$env:DATABASE_URL = "{connection string}" +~~~ + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/connect/core-note.md b/src/current/_includes/v25.3/connect/core-note.md new file mode 100644 index 00000000000..7b701cafb80 --- /dev/null +++ b/src/current/_includes/v25.3/connect/core-note.md @@ -0,0 +1,7 @@ +{{site.data.alerts.callout_info}} +The connection information shown on this page uses [client certificate and key authentication]({% link {{ page.version.version }}/authentication.md %}#client-authentication) to connect to a secure, CockroachDB {{ site.data.products.core }} cluster. + +To connect to a CockroachDB {{ site.data.products.core }} cluster with client certificate and key authentication, you must first [generate server and client certificates]({% link {{ page.version.version }}/authentication.md %}#using-digital-certificates-with-cockroachdb). + +For instructions on starting a secure cluster, see [Start a Local Cluster (Secure)]({% link {{ page.version.version }}/secure-a-cluster.md %}). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/connect/jdbc-connection-url.md b/src/current/_includes/v25.3/connect/jdbc-connection-url.md new file mode 100644 index 00000000000..c055a390b4e --- /dev/null +++ b/src/current/_includes/v25.3/connect/jdbc-connection-url.md @@ -0,0 +1,19 @@ +Set a `JDBC_DATABASE_URL` environment variable to your JDBC connection string. + +
+ +{% include_cached copy-clipboard.html %} +~~~ shell +export JDBC_DATABASE_URL="{connection string}" +~~~ + +
+ +
+ +{% include_cached copy-clipboard.html %} +~~~ shell +$env:JDBC_DATABASE_URL = "{connection string}" +~~~ + +
diff --git a/src/current/_includes/v25.3/crdb-internal-cluster-locks-warning.md b/src/current/_includes/v25.3/crdb-internal-cluster-locks-warning.md new file mode 100644 index 00000000000..d8bc82936cc --- /dev/null +++ b/src/current/_includes/v25.3/crdb-internal-cluster-locks-warning.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +Querying the `crdb_internal.cluster_locks` table triggers an RPC fan-out to all nodes in the cluster, which can make it a relatively expensive operation. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/crdb-internal-transaction-contention-events-warning.md b/src/current/_includes/v25.3/crdb-internal-transaction-contention-events-warning.md new file mode 100644 index 00000000000..3f67e853b7d --- /dev/null +++ b/src/current/_includes/v25.3/crdb-internal-transaction-contention-events-warning.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +Querying the `crdb_internal.transaction_contention_events` table triggers an expensive RPC fan-out to all nodes, making it a resource-intensive operation. Avoid frequent polling and do not use this table for continuous monitoring. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/dedicated-pci-compliance.md b/src/current/_includes/v25.3/dedicated-pci-compliance.md new file mode 100644 index 00000000000..59dd7790daf --- /dev/null +++ b/src/current/_includes/v25.3/dedicated-pci-compliance.md @@ -0,0 +1,7 @@ +{{site.data.alerts.callout_info}} +CockroachDB {{ site.data.products.dedicated }} clusters comply with the Payment Card Industry Data Security Standard (PCI DSS). Compliance is certified by a PCI Qualified Security Assessor (QSA). + +To achieve compliance with PCI DSS on a CockroachDB {{ site.data.products.dedicated }} cluster, you must enable all required features in your CockroachDB {{ site.data.products.cloud }} organization and your cluster, and you must take additional steps to ensure that your organization's applications and procedures comply with PCI DSS. For details, refer to [PCI DSS Compliance in CockroachDB {{ site.data.products.dedicated }} advanced](https://cockroachlabs.com/docs/cockroachcloud/pci-dss.html). + +To learn more about achieving PCI DSS compliance with CockroachDB {{ site.data.products.dedicated }}, contact your Cockroach Labs account team. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/demo_movr.md b/src/current/_includes/v25.3/demo_movr.md new file mode 100644 index 00000000000..5a8a431193a --- /dev/null +++ b/src/current/_includes/v25.3/demo_movr.md @@ -0,0 +1,6 @@ +Start the [MovR database]({% link {{ page.version.version }}/movr.md %}) on a 3-node CockroachDB demo cluster with a larger data set. + +{% include_cached copy-clipboard.html %} +~~~ shell +cockroach demo movr --num-histories 250000 --num-promo-codes 250000 --num-rides 125000 --num-users 12500 --num-vehicles 3750 --nodes 3 +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/essential-alerts.md b/src/current/_includes/v25.3/essential-alerts.md new file mode 100644 index 00000000000..dd311c760c6 --- /dev/null +++ b/src/current/_includes/v25.3/essential-alerts.md @@ -0,0 +1,533 @@ +{% if include.deployment == 'self-hosted' %} +## Platform + +### High CPU + +A node with a high CPU utilization, an *overloaded* node, has a limited ability to process the user workload and increases the risks of cluster instability. + +**Metric** +
[`sys.cpu.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-combined-percent-normalized) +
[`sys.cpu.host.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-host-combined-percent-normalized) + +**Rule** +
Set alerts for each node for each of the listed metrics: +
WARNING: Metric greater than `0.80` for `4 hours` +
CRITICAL: Metric greater than `0.90` for `1 hour` + +**Action** + +- Refer to [CPU Usage]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu-usage) and [Workload Concurrency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#workload-concurrency). + +- In the DB Console, navigate to **Metrics**, [**Hardware** dashboard]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}) for the cluster and check for high values on the [**CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#cpu-percent) and the [**Host CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#host-cpu-percent). + +- In the DB Console, navigate to **Metrics**, [**SQL** dashboard]({% link {{ page.version.version }}/ui-sql-dashboard.md %}) for the cluster and check for high values on the [**Active SQL Statements** graph]({% link {{ page.version.version }}/ui-sql-dashboard.md %}#active-sql-statements). This graph shows the true concurrency of the workload, which may exceed the cluster capacity planning guidance of no more than 4 active statements per vCPU or core. + +- A persistently high CPU utilization of all nodes in a CockroachDB cluster suggests the current compute resources may be insufficient to support the user workload's concurrency requirements. If confirmed, the number of processors (vCPUs or cores) in the CockroachDB cluster needs to be adjusted to sustain the required level of workload concurrency. For a prompt resolution, either add cluster nodes or throttle the workload concurrency, for example, by reducing the number of concurrent connections to not exceed 4 active statements per vCPU or core. + +### Hot node (hotspot) + +Unbalanced utilization of CockroachDB nodes in a cluster may negatively affect the cluster's performance and stability, with some nodes getting overloaded while others remain relatively underutilized. + +**Metric** +
[`sys.cpu.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-host-combined-percent-normalized) +
[`sys.cpu.host.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-host-combined-percent-normalized) + +**Rule** +
Set alerts for each of the listed metrics: +
WARNING: The max CPU utilization across all nodes exceeds the cluster's median CPU utilization by `30` for `2 hours` + +**Action** + +- Refer to [Understand hotspots]({% link {{ page.version.version }}/understand-hotspots.md %}). + +### Node memory utilization + +One node with high memory utilization is a cluster stability risk. High memory utilization is a prelude to a node's [out-of-memory (OOM) crash]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash) — the process is terminated by the OS when the system is critically low on memory. An OOM condition is not expected to occur if a CockroachDB cluster is provisioned and sized per [Cockroach Labs guidance]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#memory-planning). + +**Metric** +
[`sys.rss`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-rss) + +**Rule** +
Set alerts for each node: +
WARNING: `sys.rss` greater than `0.80` for `4 hours` +
CRITICAL: `sys.rss` greater than `0.90` for `1 hour` + +**Action** + +- Provision all CockroachDB VMs or machines with [sufficient RAM]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory). + +### Node storage performance + +Under-configured or under-provisioned disk storage is a common root cause of inconsistent CockroachDB cluster performance and could also lead to cluster instability. Refer to [Disk IOPS]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#disk-iops). + +**Metric** +
[`sys.host.disk.iopsinprogress`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-host-disk-iopsinprogress) + +**Rule** +
WARNING: `sys.host.disk.iopsinprogress` greater than `10` for `10 seconds` +
CRITICAL: `sys.host.disk.iopsinprogress` greater than `20` for `10 seconds` + +**Action** + +- Provision enough storage capacity for CockroachDB data, and configure your volumes to maximize disk I/O. Refer to [Storage and disk I/O]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-and-disk-i-o). + +### Version mismatch + +All CockroachDB cluster nodes should be running the same exact executable (with identical build label). This warning guards against an operational error where some nodes were not upgraded. + +**Metric** +
`build.timestamp` + +**Rule** +
Set alerts for each node: +
WARNING: `build.timestamp` not the same across cluster nodes for more than `4 hours` + +**Action** + +- Ensure all cluster nodes are running exactly the same CockroachDB version, including the patch release version number. + +### High open file descriptor count + +Send an alert when a cluster is getting close to the open file descriptor limit. + +**Metric** +
`sys.fd.open` +
`sys.fd.softlimit` + +**Rule** +
Set alerts for each node: +
WARNING: `sys_fd_open` / `sys_fd_softlimit` greater than `0.8` for `10 minutes` + +**Action** + +- Refer to [File descriptors limit]({% link {{ page.version.version }}/recommended-production-settings.md %}#file-descriptors-limit). +{% endif %} + +## Storage + +### Node storage capacity + +A CockroachDB node will not able to operate if there is no free disk space on a CockroachDB [store]({% link {{ page.version.version }}/cockroach-start.md %}#store) volume. + +**Metric** +
[`capacity`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#capacity) +
[`capacity.available`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#capacity-available) + +**Rule** +
Set alerts for each node: +
WARNING: `capacity.available`/`capacity` is less than `0.30` for `24 hours` +
CRITICAL: `capacity.available`/`capacity` is less than `0.10` for `1 hour` + +**Action** + +- Refer to [Storage Capacity]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-capacity). +- Increase the size of CockroachDB node storage capacity. CockroachDB storage volumes should not be utilized more than 60% (40% free space). +- In a "disk full" situation, you may be able to get a node "unstuck" by removing the [automatically created emergency ballast file]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#automatic-ballast-files). + +{% if include.deployment == 'self-hosted' %} +### Write stalls + +A high `write-stalls` value means CockroachDB is unable to write to a disk in an acceptable time, resulting in CockroachDB facing a disk latency issue and not responding to writes. + +**Metric** +
[`storage.write-stalls`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#storage-write-stalls) + +**Rule** +
Set alerts for each node: +
WARNING: `storage.write-stalls` per minute is greater than or equal to `1` per minute +
CRITICAL: `storage.write-stalls` per second is greater than or equal to `1` per second + +**Action** + +- Refer to [Disk stalls]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#disk-stalls). +{% endif %} + +{% if include.deployment == 'self-hosted' %} +## Health + +### Node restarting too frequently + +Send an alert if a node has restarted more than once in the last 10 minutes. Calculate this using the number of times the `sys.uptime` metric was reset back to zero. + +**Metric** +
[`sys.uptime`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-uptime) + +**Rule** +
Set alerts for each node: +
WARNING: `sys.uptime` resets greater than `1` in the last `10 minutes` + +**Action** + +- Refer to [Node process restarts]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#node-process-restarts). + +### Node LSM storage health + +CockroachDB uses the [Pebble]({% link {{ page.version.version }}/architecture/storage-layer.md %}#pebble) storage engine that uses a [Log-structured Merge-tree (LSM tree)]({% link {{ page.version.version }}/architecture/storage-layer.md %}#log-structured-merge-trees) to manage data storage. The health of an LSM tree can be measured by the [*read amplification*]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms), which is the average number of [SST files]({% link {{ page.version.version }}/architecture/storage-layer.md %}#log-structured-merge-trees) being checked per read operation. A value in the single digits is characteristic of a healthy LSM tree. A value in the double, triple, or quadruple digits suggests an [inverted LSM]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms). A node reporting a high read amplification is an indication of a problem on that node that is likely to affect the workload. + +**Metric** +
`rocksdb.read-amplification` + +**Rule** +
Set alerts for each node: +
WARNING: `rocksdb.read-amplification` greater than `50` for `1 hour` +
CRITICAL: `rocksdb.read-amplification` greater than `150` for `15 minutes` + +**Action** + +- Refer to [LSM Health]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#lsm-health). + +## Expiration of license and certificates + +### Enterprise license expiration + +Avoid [license]({% link {{ page.version.version }}/licensing-faqs.md %}#types-of-licenses) expiration to avoid any disruption to feature access. + +**Metric** +
[`seconds.until.enterprise.license.expiry`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#seconds-until-enterprise-license-expiry) + +**Rule** +
WARNING: `seconds.until.enterprise.license.expiry` is greater than `0` and less than `1814400` seconds (3 weeks) +
CRITICAL: `seconds.until.enterprise.license.expiry` is greater than `0` and less than `259200` seconds (3 days) + +**Action** + +[Renew the enterprise license]({% link {{ page.version.version }}/licensing-faqs.md %}#renew-an-expired-license). + +### Security certificate expiration + +Avoid [security certificate]({% link {{ page.version.version }}/cockroach-cert.md %}) expiration. + +**Metric** +
[`security.certificate.expiration.ca`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-ca) +
[`security.certificate.expiration.client-ca`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-client-ca) +
[`security.certificate.expiration.ui`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-ui) +
[`security.certificate.expiration.ui-ca`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-ui-ca) +
[`security.certificate.expiration.node`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-node) +
[`security.certificate.expiration.node-client`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-node-client) + +**Rule** +
Set alerts for each of the listed metrics: +
WARNING: Metric is greater than `0` and less than `1814400` seconds (3 weeks) until enterprise license expiration +
CRITICAL: Metric is greater than `0` and less than `259200` seconds (3 days) until enterprise license expiration + +**Action** + +[Rotate the expiring certificates]({% link {{ page.version.version }}/rotate-certificates.md %}). +{% endif %} + +{% if include.deployment == 'self-hosted' %} +## KV distributed + +{{site.data.alerts.callout_info}} +During [rolling maintenance]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}) or planned cluster resizing, the nodes' state and count will be changing. **Mute KV distributed alerts described in the following sections during routine maintenance procedures** to avoid unnecessary distractions. +{{site.data.alerts.end}} + +### Heartbeat latency + +Monitor the cluster health for early signs of instability. If this metric exceeds 1 second, it is a sign of instability. + +**Metric** +
[`liveness.heartbeatlatency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#liveness-heartbeatlatency) + +**Rule** +
WARNING: `liveness.heartbeatlatency` greater than `0.5s` +
CRITICAL: `liveness.heartbeatlatency` greater than `3s` + +**Action** + +- Refer to [Node liveness issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#node-liveness-issues). + +### Live node count change + +The liveness checks reported by a node is inconsistent with the rest of the cluster. Number of live nodes in the cluster (will be 0 if this node is not itself live). This is a critical metric that tracks the live nodes in the cluster. + +**Metric** +
[`liveness.livenodes`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#liveness-livenodes) + +**Rule** +
Set alerts for each node: +
WARNING: max(`liveness.livenodes`) for the cluster - min(`liveness.livenodes`) for node > `0` for `2 minutes` +
CRITICAL: max(`liveness.livenodes`) for the cluster - min(`liveness.livenodes`) for node > `0` for `5 minutes` + +**Action** + +- Refer to [Node liveness issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#node-liveness-issues). + +### Intent buildup + +Send an alert when very large transactions are [locking]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) millions of keys (rows). A common example is a transaction with a [`DELETE`]({% link {{ page.version.version }}/delete.md %}) that affects a large number of rows. Transactions with an excessively large scope are often inadvertent, perhaps due to a non-selective filter and a specific data distribution that was not anticipated by an application developer. + +Transactions that create a large number of [write intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) could have a negative effect on the workload's performance. These transactions may create locking contention, thus limiting concurrency. This would reduce throughput, and in extreme cases, lead to stalled workloads. + +**Metric** +
`intentcount` + +**Rule** +
WARNING: `intentcount` greater than 10,000,000 for 2 minutes +
CRITICAL: `intentcount` greater than 10,000,000 for 5 minutes +
For tighter transaction scope scrutiny, lower the `intentcount` threshold that triggers an alert. + +**Action** + +- Identify the large scope transactions that acquire a lot of locks. Consider reducing the scope of large transactions, implementing them as several smaller scope transactions. For example, if the alert is triggered by a large scope `DELETE`, consider "paging" `DELETE`s that target thousands of records instead of millions. This is often the most effective resolution, however it generally means an application level [refactoring]({% link {{ page.version.version }}/bulk-update-data.md %}). +- After reviewing the workload, you may conclude that a possible performance impact of allowing transactions to take a large number of intents is not a concern. For example, a large delete of obsolete, not-in-use data may create no concurrency implications and the elapsed time to execute that transaction may not be impactful. In that case, no response could be a valid way to handle this alert. +{% endif %} + +{% if include.deployment == 'self-hosted' %} +## KV replication + +### Unavailable ranges + +Send an alert when the number of ranges with fewer live replicas than needed for quorum is non-zero for too long. + +**Metric** +
[`ranges.unavailable`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#ranges-unavailable) + +**Rule** +
WARNING: `ranges.unavailable` greater than `0` for `10 minutes` + +**Action** + +- Refer to [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). + +### Tripped replica circuit breakers + +Send an alert when a replica stops serving traffic due to other replicas being offline for too long. + +**Metric** +
`kv.replica_circuit_breaker.num_tripped_replicas` + +**Rule** +
WARNING: `kv.replica_circuit_breaker.num_tripped_replicas` greater than `0` for `10 minutes` + +**Action** + +- Refer to [Per-replica circuit breakers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#per-replica-circuit-breakers) and [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). + +### Under-replicated ranges + +Send an alert when the number of ranges with replication below the replication factor is non-zero for too long. + +**Metric** +
[`ranges.underreplicated`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#ranges-underreplicated) + +**Rule** +
WARNING: `ranges.underreplicated` greater than `0` for `1 hour` + +**Action** + +- Refer to [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). + +### Requests stuck in raft + +Send an alert when requests are taking a very long time in replication. An (evaluated) request has to pass through the replication layer, notably the quota pool and raft. If it fails to do so within a highly permissive duration, the gauge is incremented (and decremented again once the request is either applied or returns an error). A nonzero value indicates range or replica unavailability, and should be investigated. + +**Metric** +
`requests.slow.raft` + +**Rule** +
WARNING: `requests.slow.raft` greater than `0` for `10 minutes` + +**Action** + +- Refer to [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) and [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). +{% endif %} + +## SQL + +### Node not executing SQL + +Send an alert when a node is not executing SQL despite having connections. `sql.conns` shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded. + +**Metric** +
[`sql.conns`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-conns) +
`sql.query.count` + +**Rule** +
Set alerts for each node: +
WARNING: `sql.conns` greater than `0` while `sql.query.count` equals `0` + +**Action** + +- Refer to [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %}). + +### SQL query failure + +Send an alert when the query failure count exceeds a user-determined threshold based on their application's SLA. + +**Metric** +
[`sql.failure.count`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-failure-count) + +**Rule** +
WARNING: `sql.failure.count` is greater than a threshold (based on the user’s application SLA) + +**Action** + +- Use the [**Insights** page]({% link {{ page.version.version }}/ui-insights-page.md %}) to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error. + +### SQL queries experiencing high latency + +Send an alert when the query latency exceeds a user-determined threshold based on their application’s SLA. + +**Metric** +
[`sql.service.latency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-service-latency) +
[`sql.conn.latency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-conn-latency) + +**Rule** +
WARNING: (p99 or p90 of `sql.service.latency` plus average of `sql.conn.latency`) is greater than a threshold (based on the user’s application SLA) + +**Action** + +- Apply the time range of the alert to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate. Use the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}) P90 Latency and P99 latency columns to correlate [statement fingerprints]({% link {{ page.version.version }}/ui-statements-page.md %}#sql-statement-fingerprints) with this alert. + +{% if include.deployment == 'self-hosted' %} +## Backup + +### Backup failure + +While CockroachDB is a distributed product, there is always a need to ensure backups complete. + +**Metric** +
[`schedules.BACKUP.failed`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#schedules-BACKUP-failed) + +**Rule** +
Set alerts for each node: +
WARNING: `schedules.BACKUP.failed` is greater than `0` + +**Action** + +- Refer to [Backup and Restore Monitoring]({% link {{ page.version.version }}/backup-and-restore-monitoring.md %}). +{% endif %} + +## Changefeeds + +{{site.data.alerts.callout_info}} +During [rolling maintenance]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}), [changefeed jobs]({% link {{ page.version.version }}/change-data-capture-overview.md %}) restart following node restarts. **Mute changefeed alerts described in the following sections during routine maintenance procedures** to avoid unnecessary distractions. +{{site.data.alerts.end}} + +### Changefeed failure + +Changefeeds can suffer permanent failures (that the [jobs system]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}) will not try to restart). Any increase in this metric counter should prompt investigative action. + +**Metric** +
[`changefeed.failures`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-failures) + +**Rule** +
CRITICAL:  If `changefeed.failures` is greater than `0` + +**Action** + +1. If the alert is triggered during cluster maintenance, mute it. Otherwise start investigation with the following query: + + {% include_cached copy-clipboard.html %} + ```sql + SELECT job_id, status, ((high_water_timestamp/1000000000)::INT::TIMESTAMP) - NOW() AS "changefeed latency", created, LEFT(description, 60), high_water_timestamp FROM crdb_internal.jobs WHERE job_type = 'CHANGEFEED' AND status IN ('running', 'paused', 'pause-requested') ORDER BY created DESC; + ``` + +2. If the cluster is not undergoing maintenance, check the health of [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) endpoints. If the sink is [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), check for sink connection errors such as `ERROR: connecting to kafka: path.to.cluster:port: kafka: client has run out of available brokers to talk to (Is your cluster reachable?)`. + +### Frequent changefeed restarts + +Changefeeds automatically restart in case of transient errors. However too many restarts outside of a routine maintenance procedure may be due to a systemic condition and should be investigated. + +**Metric** +
[`changefeed.error_retries`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-error-retries) + +**Rule** +
WARNING:  If `changefeed.error_retries` is greater than `50` for more than `15 minutes` + +**Action** + +- Follow the action for a [changefeed failure](#changefeed-failure). + +### Changefeed falling behind + +Changefeed has fallen behind. This is determined by the end-to-end lag between a committed change and that change applied at the destination. This can be due to cluster capacity or changefeed sink availability. + +**Metric** +
[`changefeed.commit_latency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-commit-latency) + +**Rule** +
WARNING:  `changefeed.commit_latency` is greater than `10 minutes` +
CRITICAL: `changefeed.commit_latency` is greater than `15 minutes` + +**Action** + +1. In the DB Console, navigate to **Metrics**, [**Changefeeds** dashboard]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}) for the cluster and check the maximum values on the [**Commit Latency** graph]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}#commit-latency). Alternatively, individual changefeed latency can be verified by using the following SQL query: + + {% include_cached copy-clipboard.html %} + ```sql + SELECT job_id, status, ((high_water_timestamp/1000000000)::INT::TIMESTAMP) - NOW() AS "changefeed latency", created, LEFT(description, 60), high_water_timestamp FROM crdb_internal.jobs WHERE job_type = 'CHANGEFEED' AND status IN ('running', 'paused', 'pause-requested') ORDER BY created DESC; + ``` + +2. Copy the `job_id` for the changefeed job with highest `changefeed latency` and pause the job: + + {% include_cached copy-clipboard.html %} + ```sql + PAUSE JOB 681491311976841286; + ``` + +3. Check the status of the pause request by running the query from step 1. If the job status is `pause-requested`, check again in a few minutes. + +4. After the job is `paused`, resume the job. + + {% include_cached copy-clipboard.html %} + ```sql + RESUME JOB 681491311976841286; + ``` + +5. If the changefeed latency does not progress after these steps due to lack of cluster resources or availability of the changefeed sink, [contact Support](https://support.cockroachlabs.com). + +### Changefeed has been paused a long time + +Changefeed jobs should not be paused for a long time because [the protected timestamp prevents garbage collection]({% link {{ page.version.version }}/protect-changefeed-data.md %}). To protect against an operational error, this alert guards against an inadvertently forgotten pause. + +**Metric** +
[`jobs.changefeed.currently_paused`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-currently-paused) + +**Rule** +
WARNING: `jobs.changefeed.currently_paused` is greater than `0` for more than `15 minutes` +
CRITICAL: `jobs.changefeed.currently_paused` is greater than `0` for more than `60 minutes` + +**Action** + +1. Check the status of each changefeed using the following SQL query: + + {% include_cached copy-clipboard.html %} + ```sql + SELECT job_id, status, ((high_water_timestamp/1000000000)::INT::TIMESTAMP) - NOW() AS "changefeed latency",created, LEFT(description, 60), high_water_timestamp FROM crdb_internal.jobs WHERE job_type = 'CHANGEFEED' AND status IN ('running', 'paused','pause-requested') ORDER BY created DESC; + ``` + +2. If all the changefeeds have status as `running`, one or more changefeeds may have run into an error and recovered. In the DB Console, navigate to **Metrics**, [**Changefeeds** dashboard]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}) for the cluster and check the [**Changefeed Restarts** graph]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}#changefeed-restarts). + +3. Resume paused changefeed(s) with the `job_id` using: + + {% include_cached copy-clipboard.html %} + ```sql + RESUME JOB 681491311976841286; + ``` + +### Changefeed experiencing high latency + +Send an alert when the maximum latency of any running changefeed exceeds a specified threshold, which is less than the [`gc.ttlseconds`]({% link {{ page.version.version }}/configure-replication-zones.md %}#replication-zone-variables) variable set in the cluster. This alert ensures that the changefeed progresses faster than the garbage collection TTL, preventing a changefeed's protected timestamp from delaying garbage collection. + +**Metric** +
[`changefeed.checkpoint_progress`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#metrics) + +**Rule** +
WARNING: (current time minus `changefeed.checkpoint_progress`) is greater than a threshold (that is less than `gc.ttlseconds` variable) + +**Action** + +- Refer to [Monitor and Debug Changefeeds]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#recommended-changefeed-metrics-to-track). + +## See also + +- [Events to alert on]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#events-to-alert-on) +- [Common Issues to Monitor]({% link {{ page.version.version }}/common-issues-to-monitor.md %}) +{% if include.deployment == 'self-hosted' %} +- [Essential Metrics for CockroachDB Self-Hosted Deployments]({% link {{ page.version.version }}/essential-metrics-self-hosted.md %}) +{% elsif include.deployment == 'advanced' %} +- [Essential Metrics for CockroachDB Advanced Deployments]({% link {{ page.version.version }}/essential-metrics-advanced.md %}) +{% endif %} + diff --git a/src/current/_includes/v25.3/essential-metrics.md b/src/current/_includes/v25.3/essential-metrics.md new file mode 100644 index 00000000000..7c958db3f50 --- /dev/null +++ b/src/current/_includes/v25.3/essential-metrics.md @@ -0,0 +1,204 @@ +These essential CockroachDB metrics enable you to build custom dashboards with the following tools: +{% if include.deployment == 'self-hosted' %} +* [Grafana]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}#step-5-visualize-metrics-in-grafana) +* [Datadog Integration]({% link {{ page.version.version }}/datadog.md %}) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics) column lists the corresponding Datadog metric which requires the `cockroachdb.` prefix. +{% elsif include.deployment == 'advanced' %} +* [Datadog integration]({% link cockroachcloud/tools-page.md %}#monitor-cockroachdb-cloud-with-datadog) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics) column lists the corresponding Datadog metric which requires the `crdb_dedicated.` prefix. +* [Metrics export]({% link cockroachcloud/export-metrics-advanced.md %}) +{% endif %} + +The **Usage** column explains why each metric is important to visualize in a custom dashboard and how to make both practical and actionable use of the metric in a production deployment. + +## Platform + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| sys.cpu.combined.percent-normalized | sys.cpu.combined.percent.normalized | Current user+system CPU percentage consumed by the CRDB process, normalized by number of cores | This metric gives the CPU utilization percentage by the CockroachDB process. If it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should not be running with over 80% utilization for extended periods of time (hours). This metric is used in the DB Console [**CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#cpu-percent). | +| sys.cpu.host.combined.percent-normalized | NOT AVAILABLE | Current user+system CPU percentage consumed by all processes on the host OS, normalized by number of cores. If the CRDB process is run in a containerized environment, the host OS is the container since the CRDB process cannot inspect CPU usage beyond the container. | This metric gives the CPU utilization percentage of the underlying server, virtual machine, or container hosting the CockroachDB process. It includes CPU usage from both CockroachDB and non-CockroachDB processes. It also accounts for time spent processing hardware (`irq`) and software (`softirq`) interrupts, as well as `nice` time, which represents low-priority user-mode activity.

A value of 1 (or 100%) indicates that the CPU is overloaded. Avoid running the CockroachDB process in an environment where the CPU remains overloaded for extended periods (e.g. multiple hours). This metric appears in the DB Console on the **Host CPU Percent** graph. | +| sys.cpu.user.percent | sys.cpu.user.percent | Current user CPU percentage consumed by the CRDB process | This metric gives the CPU usage percentage at the user level by the CockroachDB process only. This is similar to the Linux `top` command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics. | +| sys.cpu.sys.percent | sys.cpu.sys.percent | Current system CPU percentage consumed by the CRDB process | This metric gives the CPU usage percentage at the system (Linux kernel) level by the CockroachDB process only. This is similar to the Linux `top` command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics. | +| sys.rss | sys.rss | Current process memory (RSS) | This metric gives the amount of RAM used by the CockroachDB process. Persistently low values over an extended period of time suggest there is underutilized memory that can be put to work with adjusted [settings for `--cache` or `--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size) or both. Conversely, a high utilization, even if a temporary spike, indicates an increased risk of [Out-of-memory (OOM) crash]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash) (particularly since the [swap is generally disabled]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory)). | +| sql.mem.root.current | {% if include.deployment == 'self-hosted' %}sql.mem.root.current |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Current sql statement memory usage for root | This metric shows how memory set aside for temporary materializations, such as hash tables and intermediary result sets, is utilized. Use this metric to optimize memory allocations based on long term observations. The maximum amount is set with [`--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). If the utilization of sql memory is persistently low, perhaps some portion of this memory allocation can be shifted to [`--cache`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). | +| sys.host.disk.write.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.write.bytes |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes written to all disks since this process started | This metric reports the effective storage device write throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.write.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.write |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Disk write operations across all disks since this process started | This metric reports the effective storage device write IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.read.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.read.bytes |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes read from all disks since this process started | This metric reports the effective storage device read throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.read.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.read |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Disk read operations across all disks since this process started | This metric reports the effective storage device read IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. | +| sys.host.disk.iopsinprogress | {% if include.deployment == 'self-hosted' %}sys.host.disk.iopsinprogress |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} IO operations currently in progress on this host | This metric gives the average queue length of the storage device. It characterizes the storage device's performance capability. All I/O performance metrics are Linux counters and correspond to the `avgqu-sz` in the Linux `iostat` command output. You need to view the device queue graph in the context of the actual read/write IOPS and MBPS metrics that show the actual device utilization. If the device is not keeping up, the queue will grow. Values over 10 are bad. Values around 5 mean the device is working hard trying to keep up. For internal (on chassis) [NVMe](https://www.wikipedia.org/wiki/NVM_Express) devices, the queue values are typically 0. For network connected devices, such as [AWS EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html), the normal operating range of values is 1 to 2. Spikes in values are OK. They indicate an I/O spike where the device fell behind and then caught up. End users may experience inconsistent response times, but there should be no cluster stability issues. If the queue is greater than 5 for an extended period of time and IOPS or MBPS are low, then the storage is most likely not provisioned per Cockroach Labs guidance. In AWS EBS, it is commonly an EBS type, such as gp2, not suitable as database primary storage. If I/O is low and the queue is low, the most likely scenario is that the CPU is lacking and not driving I/O. One such case is a cluster with nodes with only 2 vcpus which is not supported [sizing]({% link {{ page.version.version }}/recommended-production-settings.md %}#sizing) for production deployments. There are quite a few background processes in the database that take CPU away from the workload, so the workload is just not getting the CPU. Review [storage and disk I/O]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-and-disk-i-o). | +| sys.host.net.recv.bytes | sys.host.net.recv.bytes | Bytes received on all network interfaces since this process started | This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. | +| sys.host.net.send.bytes | sys.host.net.send.bytes | Bytes sent on all network interfaces since this process started | This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. | +| clock-offset.meannanos | clock.offset.meannanos | Mean clock offset with other nodes | This metric gives the node's clock skew. In a well-configured environment, the actual clock skew would be in the sub-millisecond range. A skew exceeding 5 ms is likely due to a NTP service mis-configuration. Reducing the actual clock skew reduces the probability of uncertainty related conflicts and corresponding retires which has a positive impact on workload performance. Conversely, a larger actual clock skew increases the probability of retries due to uncertainty conflicts, with potentially measurable adverse effects on workload performance. | + +## Storage + +
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| capacity | {% if include.deployment == 'self-hosted' %}capacity.total |{% elsif include.deployment == 'advanced' %}capacity |{% endif %} Total storage capacity | This metric gives total storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | +| capacity.available | capacity.available | Available storage capacity | This metric gives available storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | +| capacity.used | capacity.used | Used storage capacity | This metric gives used storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). | +| storage.wal.fsync.latency | {% if include.deployment == 'self-hosted' %}storage.wal.fsync.latency |{% elsif include.deployment == 'advanced' %}storage.wal.fsync.latency |{% endif %} This metric reports the latency of writes to the [WAL]({% link {{ page.version.version }}/architecture/storage-layer.md %}#memtable-and-write-ahead-log). | If this value is greater than `100ms`, it is an indication of a [disk stall]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#disk-stalls). To mitigate the effects of disk stalls, consider deploying your cluster with [WAL failover]({% link {{ page.version.version }}/wal-failover.md %}) configured. | +| storage.write-stalls | {% if include.deployment == 'self-hosted' %}storage.write.stalls |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of instances of intentional write stalls to backpressure incoming writes | This metric reports actual disk stall events. Ideally, investigate all reports of disk stalls. As a pratical guideline, one stall per minute is not likely to have a material impact on workload beyond an occasional increase in response time. However one stall per second should be viewed as problematic and investigated actively. It is particularly problematic if the rate persists over an extended period of time, and worse, if it is increasing. | +| rocksdb.compactions | rocksdb.compactions.total | Number of SST compactions | This metric reports the number of a node's [LSM compactions]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#lsm-health). If the number of compactions remains elevated while the LSM health does not improve, compactions are not keeping up with the workload. If the condition persists for an extended period, the cluster will initially exhibit performance issues that will eventually escalate into stability issues. | +| rocksdb.block.cache.hits | rocksdb.block.cache.hits | Count of block cache hits | This metric gives hits to block cache which is reserved memory. It is allocated upon the start of a node process by the [`--cache` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. | +| rocksdb.block.cache.misses | rocksdb.block.cache.misses | Count of block cache misses | This metric gives misses to block cache which is reserved memory. It is allocated upon the start of a node process by the [`--cache` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. | + +## Health + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| sys.uptime | sys.uptime | Process uptime | This metric measures the length of time, in seconds, that the CockroachDB process has been running. Monitor this metric to detect events such as node restarts, which may require investigation or intervention. | +| admission.io.overload | {% if include.deployment == 'self-hosted' %}admission.io.overload |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} 1-normalized float indicating whether IO admission control considers the store as overloaded with respect to compaction out of L0 (considers sub-level and file counts). | If the value of this metric exceeds 1, then it indicates overload. You can also look at the metrics `storage.l0-num-files`, `storage.l0-sublevels` or `rocksdb.read-amplification` directly. A healthy LSM shape is defined as “read-amp < 20” and “L0-files < 1000”, looking at [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) `admission.l0_sub_level_count_overload_threshold` and `admission.l0_file_count_overload_threshold` respectively. | +| admission.wait_durations.kv-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if CPU utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by CPU control. If observing over 100ms waits for over 5 seconds while there was excess CPU capacity available, then the admission control is overly aggressive. | +| admission.wait_durations.kv-stores-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv_stores |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if I/O utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by I/O control. If observing over 100ms waits for over 5 seconds while there was excess I/O capacity available, then the admission control is overly aggressive. | +| sys.runnable.goroutines.per.cpu | {% if include.deployment == 'self-hosted' %}sys.runnable.goroutines.per_cpu |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Average number of goroutines that are waiting to run, normalized by number of cores | If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review [CPU planning]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu). + +{% if include.deployment == 'self-hosted' %} +## Network + +|
CockroachDB Metric Name
|
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|
Description
| Usage | +| ------------------------------------------------------ | --------------------------------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| rpc.connection.avg_round_trip_latency | rpc.connection.avg_round_trip_latency | Sum of exponentially weighted moving average of round-trip latencies, as measured through a gRPC RPC. Dividing this gauge by `rpc.connection.healthy` gives an approximation of average latency, but the top-level round-trip-latency histogram is more useful. Instead, users should consult the label families of this metric if they are available (which requires Prometheus and the cluster setting `server.child_metrics.enabled`); these provide per-peer moving averages. This metric does not track failed connection. A failed connection's contribution is reset to zero. | This metric is helpful in understanding general network issues outside of CockroachDB that could be impacting the user’s workload. | +| rpc.connection.failures | rpc.connection.failures.count | Counter of failed connections. This includes both the event in which a healthy connection terminates as well as unsuccessful reconnection attempts. Connections that are terminated as part of local node shutdown are excluded. Decommissioned peers are excluded. | See Description. | +| rpc.connection.healthy | rpc.connection.healthy | Gauge of current connections in a healthy state (i.e., bidirectionally connected and heartbeating). | See Description. | +| rpc.connection.healthy_nanos | rpc.connection.healthy_nanos | Gauge of nanoseconds of healthy connection time. On the Prometheus endpoint scraped when the cluster setting `server.child_metrics.enabled` is set, this gauge allows you to see the duration for which a given peer has been connected in a healthy state. | This can be useful for monitoring the stability and health of connections within your CockroachDB cluster. | +| rpc.connection.heartbeats | rpc.connection.heartbeats.count | Counter of successful heartbeats. | See Description. | +| rpc.connection.unhealthy | rpc.connection.unhealthy | Gauge of current connections in an unhealthy state (not bidirectionally connected or heartbeating). | If the value of this metric is greater than 0, this could indicate a network partition. | +| rpc.connection.unhealthy_nanos | rpc.connection.unhealthy_nanos | Gauge of nanoseconds of unhealthy connection time. On the Prometheus endpoint scraped when the cluster setting `server.child_metrics.enabled` is set, this gauge allows you to see the duration for which a given peer has been unreachable. | If this duration is greater than 0, this could indicate how long a network partition has been occurring. | +{% endif %} + +{% if include.deployment == 'self-hosted' %} +## Expiration of license and certificates + +|
CockroachDB Metric Name
|
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|
Description
| Usage | +| ----------------------------------------------------- | ---------------------------------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| seconds.until.enterprise.license.expiry | seconds.until.enterprise.license.expiry | Seconds until enterprise license expiry (0 if no license present or running without enterprise features) | See Description. | +| security.certificate.expiration.ca | security.certificate_expiration.ca | Expiration for the CA certificate. 0 means no certificate or error | See Description. | +| security.certificate.expiration.client-ca | security.certificate_expiration.client_ca | Expiration for the client CA certificate. 0 means no certificate or error| See Description. | +| security.certificate.expiration.ui | security.certificate_expiration.ui | Expiration for the UI certificate. 0 means no certificate or error| See Description. | +| security.certificate.expiration.ui-ca | security.certificate_expiration.ui_ca | Expiration for the UI CA certificate. 0 means no certificate or error| See Description. | +| security.certificate.expiration.node | security.certificate_expiration.node | Expiration for the node certificate. 0 means no certificate or error| See Description. | +| security.certificate.expiration.node-client | security.certificate_expiration.node_client | Expiration for the node's client certificate. 0 means no certificate or error| See Description. | +{% endif %} + +## KV distributed + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| liveness.heartbeatlatency | {% if include.deployment == 'self-hosted' %}liveness.heartbeatlatency-p90 |{% elsif include.deployment == 'advanced' %}liveness.heartbeatlatency |{% endif %} Node liveness heartbeat latency | If this metric exceeds 1 second, it is a sign of cluster instability. | +| liveness.livenodes | liveness.livenodes | Number of live nodes in the cluster (will be 0 if this node is not itself live) | This is a critical metric that tracks the live nodes in the cluster. | +| distsender.rpc.sent.nextreplicaerror | distsender.rpc.sent.nextreplicaerror | Number of replica-addressed RPCs sent due to per-replica errors | [RPC](architecture/overview.html#overview) errors do not necessarily indicate a problem. This metric tracks remote procedure calls that return a status value other than "success". A non-success status of an RPC should not be misconstrued as a network transport issue. It is database code logic executed on another cluster node. The non-success status is a result of an orderly execution of an RPC that reports a specific logical condition. | +| distsender.errors.notleaseholder | distsender.errors.notleaseholder | Number of NotLeaseHolderErrors encountered from replica-addressed RPCs | Errors of this type are normal during elastic cluster topology changes when leaseholders are actively rebalancing. They are automatically retried. However they may create occasional response time spikes. In that case, this metric may provide the explanation of the cause. | + +## KV replication + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| leases.transfers.success | leases.transfers.success | Number of successful lease transfers | A high number of [lease](architecture/replication-layer.html#leases) transfers is not a negative or positive signal, rather it is a reflection of the elastic cluster activities. For example, this metric is high during cluster topology changes. A high value is often the reason for NotLeaseHolderErrors which are normal and expected during rebalancing. Observing this metric may provide a confirmation of the cause of such errors. | +| rebalancing_lease_transfers | rebalancing.lease.transfers | Counter of the number of [lease transfers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) that occur during replica rebalancing. These lease transfers are tracked by a component that looks for a [store-level]({% link {{ page.version.version }}/cockroach-start.md %}#store) load imbalance of either QPS (`rebalancing.queriespersecond`) or CPU usage (`rebalancing.cpunanospersecond`), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}#setting-kv-allocator-load-based-rebalancing-objective). | Used to identify when there has been more rebalancing activity triggered by imbalance between stores (of QPS or CPU). If this is high (when the count is rated), it indicates that more rebalancing activity is taking place due to load imbalance between stores. | +| rebalancing_range_rebalances | {% if include.deployment == 'self-hosted' %}rebalancing.range.rebalances | {% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Counter of the number of [load-based range rebalances]({% link {{ page.version.version }}/architecture/replication-layer.md %}#load-based-replica-rebalancing). This range movement is tracked by a component that looks for [store-level]({% link {{ page.version.version }}/cockroach-start.md %}#store) load imbalance of either QPS (`rebalancing.queriespersecond`) or CPU usage (`rebalancing.cpunanospersecond`), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}#setting-kv-allocator-load-based-rebalancing-objective). | Used to identify when there has been more rebalancing activity triggered by imbalance between stores (of QPS or CPU). If this is high (when the count is rated), it indicates that more rebalancing activity is taking place due to load imbalance between stores. | +| rebalancing_replicas_queriespersecond | {% if include.deployment == 'self-hosted' %}rebalancing.replicas.queriespersecond | {% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Counter of the KV-level requests received per second by a given [store]({% link {{ page.version.version }}/cockroach-start.md %}#store). The store aggregates all of the CPU and QPS stats across all its replicas and then creates a histogram that maintains buckets that can be queried for, e.g., the P95 replica's QPS or CPU. | A high value of this metric could indicate that one of the store's replicas is part of a [hot range]({% link {{ page.version.version }}/understand-hotspots.md %}#hot-range). See also: `rebalancing_replicas_cpunanospersecond`. | +| rebalancing_replicas_cpunanospersecond | {% if include.deployment == 'self-hosted' %}rebalancing.replicas.cpunanospersecond | {% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Counter of the CPU nanoseconds of execution time per second by a given [store]({% link {{ page.version.version }}/cockroach-start.md %}#store). The store aggregates all of the CPU and QPS stats across all its replicas and then creates a histogram that maintains buckets that can be queried for, e.g., the P95 replica's QPS or CPU. | A high value of this metric could indicate that one of the store's replicas is part of a [hot range]({% link {{ page.version.version }}/understand-hotspots.md %}#hot-range). See also the non-histogram variant: `rebalancing.cpunanospersecond`. | +| rebalancing.queriespersecond | {% if include.deployment == 'self-hosted' %}rebalancing.queriespersecond |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions. | This metric shows hotspots along the queries per second (QPS) dimension. It provides insights into the ongoing rebalancing activities. | +| rebalancing.cpunanospersecond | {% if include.deployment == 'self-hosted' %}rebalancing.cpunanospersecond |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Non-histogram variant of `rebalancing_replicas_cpunanospersecond`. | See usage of `rebalancing_replicas_cpunanospersecond`. | +| ranges | ranges | Number of ranges | This metric provides a measure of the scale of the data size. | +| replicas | {% if include.deployment == 'self-hosted' %}replicas.total |{% elsif include.deployment == 'advanced' %}replicas |{% endif %} Number of replicas | This metric provides an essential characterization of the data distribution across cluster nodes. | +| replicas.leaseholders | replicas.leaseholders | Number of lease holders | This metric provides an essential characterization of the data processing points across cluster nodes. | +| ranges.underreplicated | ranges.underreplicated | Number of ranges with fewer live replicas than the replication target | This metric is an indicator of [replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). It shows whether the cluster has data that is not conforming to resilience goals. The next step is to determine the corresponding database object, such as the table or index, of these under-replicated ranges and whether the under-replication is temporarily expected. Use the statement `SELECT table_name, index_name FROM [SHOW RANGES WITH INDEXES] WHERE range_id = {id of under-replicated range};`| +| ranges.unavailable | ranges.unavailable | Number of ranges with fewer live replicas than needed for quorum | This metric is an indicator of [replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). It shows whether the cluster is unhealthy and can impact workload. If an entire range is unavailable, then it will be unable to process queries. | +| queue.replicate.replacedecommissioningreplica.error | {% if include.deployment == 'self-hosted' %}queue.replicate.replacedecommissioningreplica.error.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of failed decommissioning replica replacements processed by the replicate queue | Refer to [Decommission the node]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#decommission-the-node). | +| range.splits | {% if include.deployment == 'self-hosted' %}range.splits.total |{% elsif include.deployment == 'advanced' %}range.splits |{% endif %} Number of range splits | This metric indicates how fast a workload is scaling up. Spikes can indicate resource [hotspots]({% link {{ page.version.version }}/understand-hotspots.md %}) since the [split heuristic is based on QPS]({% link {{ page.version.version }}/load-based-splitting.md %}#control-load-based-splitting-threshold). To understand whether hotspots are an issue and with which tables and indexes they are occurring, correlate this metric with other metrics such as CPU usage, such as `sys.cpu.combined.percent-normalized`, or use the [**Hot Ranges** page]({% link {{ page.version.version }}/ui-hot-ranges-page.md %}). | +| range.merges | {% if include.deployment == 'self-hosted' %}range.merges.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of range merges | This metric indicates how fast a workload is scaling down. Merges are Cockroach's [optimization for performance](architecture/distribution-layer.html#range-merges). This metric indicates that there have been deletes in the workload. | + +## SQL + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| sql.conns | sql.conns | Number of active SQL connections | This metric shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded. Review [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %}). | +| sql.new_conns | {% if include.deployment == 'self-hosted' %}sql.new_conns.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of new connection attempts. | The rate of this metric shows how frequently new connections are being established. This can be useful in determining if a high rate of incoming new connections is causing additional load on the server due to a misconfigured application. | +| sql.txns.open | sql.txns.open | Number of currently open user SQL transactions | This metric should roughly correspond to the number of cores * 4. If this metric is consistently larger, scale out the cluster. | +| sql.statements.active | sql.statements.active | Number of currently active user SQL statements | This high-level metric reflects workload volume. | +| sql.failure.count | {% if include.deployment == 'self-hosted' %}sql.failure |{% elsif include.deployment == 'advanced' %}sql.failure.count |{% endif %} Number of statements resulting in a planning or runtime error | This metric is a high-level indicator of workload and application degradation with query failures. Use the [Insights page]({% link {{ page.version.version }}/ui-insights-page.md %}) to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error. | +| sql.full.scan.count | {% if include.deployment == 'self-hosted' %}sql.full.scan |{% elsif include.deployment == 'advanced' %}sql.full.scan.count |{% endif %} Number of full table or index scans | This metric is a high-level indicator of potentially suboptimal query plans in the workload that may require index tuning and maintenance. To identify the [statements with a full table scan]({% link {{ page.version.version }}/performance-recipes.md %}#statements-with-full-table-scans), use `SHOW FULL TABLE SCAN` or the [**SQL Activity Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}) with the corresponding metric time frame. The **Statements** page also includes [explain plans]({% link {{ page.version.version }}/ui-statements-page.md %}#explain-plans) and [index recommendations]({% link {{ page.version.version }}/ui-statements-page.md %}#insights). Not all full scans are necessarily bad especially over smaller tables. | +| sql.insert.count | sql.insert.count | Number of SQL INSERT statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | +| sql.update.count | sql.update.count | Number of SQL UPDATE statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | +| sql.delete.count | sql.delete.count | Number of SQL DELETE statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | +| sql.select.count | sql.select.count | Number of SQL SELECT statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | +| sql.ddl.count | sql.ddl.count | Number of SQL DDL statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. | +| sql.txn.begin.count | sql.txn.begin.count | Number of SQL transaction BEGIN statements successfully executed | This metric reflects workload volume by counting explicit [transactions]({% link {{ page.version.version }}/transactions.md %}). Use this metric to determine whether explicit transactions can be refactored as implicit transactions (individual statements). | +| sql.txn.commit.count | sql.txn.commit.count | Number of SQL transaction COMMIT statements successfully executed | This metric shows the number of [transactions]({% link {{ page.version.version }}/transactions.md %}) that completed successfully. This metric can be used as a proxy to measure the number of successful explicit transactions. | +| sql.txn.rollback.count | sql.txn.rollback.count | Number of SQL transaction ROLLBACK statements successfully executed | This metric shows the number of orderly transaction [rollbacks]({% link {{ page.version.version }}/rollback-transaction.md %}). A persistently high number of rollbacks may negatively impact the workload performance and needs to be investigated. | +| sql.txn.abort.count | sql.txn.abort.count | Number of SQL transaction abort errors | This high-level metric reflects workload performance. A persistently high number of SQL transaction abort errors may negatively impact the workload performance and needs to be investigated. | +| sql.service.latency-p90, sql.service.latency-p99 | sql.service.latency | Latency of SQL request execution | These high-level metrics reflect workload performance. Monitor these metrics to understand latency over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. The [**Statements page**]({% link {{ page.version.version }}/ui-statements-page.md %}) has P90 Latency and P99 latency columns to enable correlation with this metric. | +| sql.txn.latency-p90, sql.txn.latency-p99 | sql.txn.latency | Latency of SQL transactions | These high-level metrics provide a latency histogram of all executed SQL transactions. These metrics provide an overview of the current SQL workload. | +| txnwaitqueue.deadlocks_total | {% if include.deployment == 'self-hosted' %}txnwaitqueue.deadlocks.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of deadlocks detected by the transaction wait queue | Alert on this metric if its value is greater than zero, especially if transaction throughput is lower than expected. Applications should be able to detect and recover from deadlock errors. However, transaction performance and throughput can be maximized if the application logic avoids deadlock conditions in the first place, for example, by keeping transactions as short as possible. | +| sql.distsql.contended_queries.count | {% if include.deployment == 'self-hosted' %}sql.distsql.contended.queries |{% elsif include.deployment == 'advanced' %} sql.distsql.contended.queries |{% endif %} Number of SQL queries that experienced contention | This metric is incremented whenever there is a non-trivial amount of contention experienced by a statement whether read-write or write-write conflicts. Monitor this metric to correlate possible workload performance issues to contention conflicts. | +| sql.conn.failures | sql.conn.failures.count | Number of SQL connection failures | This metric is incremented whenever a connection attempt fails for any reason, including timeouts. | +| sql.conn.latency-p90, sql.conn.latency-p99 | sql.conn.latency | Latency to establish and authenticate a SQL connection | These metrics characterize the database connection latency which can affect the application performance, for example, by having slow startup times. Connection failures are not recorded in these metrics.| +| txn.restarts.serializable | txn.restarts.serializable | Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.writetooold | txn.restarts.writetooold | Number of restarts due to a concurrent writer committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.writetoooldmulti | {% if include.deployment == 'self-hosted' %}txn.restarts.writetoooldmulti.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to multiple concurrent writers committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.unknown | {% if include.deployment == 'self-hosted' %}txn.restarts.unknown.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to a unknown reasons | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.txnpush | {% if include.deployment == 'self-hosted' %}txn.restarts.txnpush.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to a transaction push failure | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. | +| txn.restarts.txnaborted | {% if include.deployment == 'self-hosted' %}txn.restarts.txnaborted.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to an abort by a concurrent transaction | The errors tracked by this metric are generally due to deadlocks. Deadlocks can often be prevented with a considered transaction design. Identify the conflicting transactions involved in the deadlocks, then, if possible, redesign the business logic implementation prone to deadlocks. | + +## Table Statistics + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| jobs.auto_create_stats.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.resume_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs which failed with a non-retryable error | This metric is a high-level indicator that automatically generated [table statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) is failing. Failed statistic creation can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. | +| jobs.auto_create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently running | This metric tracks the number of active automatically generated statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. | +| jobs.auto_create_stats.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently considered Paused | This metric is a high-level indicator that automatically generated statistics jobs are paused which can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. | +| jobs.create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.create.stats.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of create_stats jobs currently running | This metric tracks the number of active create statistics jobs that may be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. | + +## Backup and Restore + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| jobs.backup.currently_running | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently running | See Description. | +| jobs.backup.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a backup job in a paused state for an extended period of time. In functional areas, a paused job can hold resources or have concurrency impact or some other negative consequence. Paused backup may break the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). | +| schedules.BACKUP.failed | {% if include.deployment == 'self-hosted' %}schedules.backup.failed |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of BACKUP jobs failed | Monitor this metric and investigate backup job failures. | +| schedules.BACKUP.last-completed-time | {% if include.deployment == 'self-hosted' %}schedules.backup.last_completed_time |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The Unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric | Monitor this metric to ensure that backups are meeting the [recovery point objective (RPO)]({% link {{ page.version.version }}/disaster-recovery-overview.md %}). Each node exports the time that it last completed a backup on behalf of the schedule. If a node is restarted, it will report `0` until it completes a backup. If all nodes are restarted, `max()` is `0` until a node completes a backup.

To make use of this metric, first, from each node, take the maximum over a rolling window equal to or greater than the backup frequency, and then take the maximum of those values across nodes. For example with a backup frequency of 60 minutes, monitor `time() - max_across_nodes(max_over_time(schedules_BACKUP_last_completed_time, 60min))`. | + +## Changefeeds + +If [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) are created in a CockroachDB cluster, monitor these additional metrics in your custom dashboards: + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| changefeed.running | changefeed.running | Number of currently running changefeeds, including sinkless | This metric tracks the total number of all running changefeeds. | +| jobs.changefeed.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.changefeed.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of changefeed jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a changefeed job in a paused state for an extended period of time. Changefeed jobs should not be paused for a long time because the [protected timestamp prevents garbage collection]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). | +| changefeed.failures | changefeed.failures | Total number of changefeed jobs which have failed | This metric tracks the permanent changefeed job failures that the jobs system will not try to restart. Any increase in this counter should be investigated. An alert on this metric is recommended. | +| changefeed.error_retries | changefeed.error.retries | Total retryable errors encountered by all changefeeds | This metric tracks transient changefeed errors. Alert on "too many" errors, such as 50 retries in 15 minutes. For example, during a rolling upgrade this counter will increase because the changefeed jobs will restart following node restarts. There is an exponential backoff, up to 10 minutes. But if there is no rolling upgrade in process or other cluster maintenance, and the error rate is high, investigate the changefeed job. +| changefeed.emitted_messages | changefeed.emitted.messages | Messages emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the rate of changes being streamed from the CockroachDB cluster. | +| changefeed.emitted_bytes | {% if include.deployment == 'self-hosted' %}changefeed.emitted_bytes.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the throughput bytes being streamed from the CockroachDB cluster. | +| changefeed.commit_latency | changefeed.commit.latency | The difference between the event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the oldest event in the batch and acknowledgement is recorded. Latency during backfill is excluded.| This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the end-to-end lag between a committed change and that change applied at the destination. | +| jobs.changefeed.protected_age_sec | {% if include.deployment == 'self-hosted' %}jobs.changefeed.protected_age_sec |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The age of the oldest PTS record protected by changefeed jobs | [Changefeeds use protected timestamps to protect the data from being garbage collected]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). Ensure the protected timestamp age does not significantly exceed the [GC TTL zone configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}#replication-zone-variables). Alert on this metric if the protected timestamp age is greater than 3 times the GC TTL. | + +## Row-Level TTL + +If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is configured for any table in a CockroachDB cluster, monitor these additional metrics in your custom dashboards: + +|
CockroachDB Metric Name
| {% if include.deployment == 'self-hosted' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics)
(add `cockroachdb.` prefix)
|{% elsif include.deployment == 'advanced' %}
[Datadog Integration Metric Name](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics)
(add `crdb_dedicated.` prefix)
|{% endif %}
Description
| Usage | +| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ | +| jobs.row_level_ttl.resume_completed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_completed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which successfully resumed to completion | If Row Level TTL is enabled, this metric should be nonzero and correspond to the `ttl_cron` setting that was chosen. If this metric is zero, it means the job is not running | +| jobs.row_level_ttl.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which failed with a non-retryable error | This metric should remain at zero. Repeated errors means the Row Level TTL job is not deleting data. | +| jobs.row_level_ttl.rows_selected | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_selected.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of rows selected for deletion by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_deleted` to ensure all the rows that should be deleted are actually getting deleted. | +| jobs.row_level_ttl.rows_deleted | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_deleted.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of rows deleted by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_selected` to ensure all the rows that should be deleted are actually getting deleted. | +| jobs.row_level_ttl.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently considered Paused | Monitor this metric to ensure the Row Level TTL job does not remain paused inadvertently for an extended period. | +| jobs.row_level_ttl.currently_running | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently running | Monitor this metric to ensure there are not too many Row Level TTL jobs running at the same time. Generally, this metric should be in the low single digits. | +| schedules.scheduled-row-level-ttl-executor.failed | {% if include.deployment == 'self-hosted' %}schedules.scheduled.row.level.ttl.executor_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of scheduled-row-level-ttl-executor jobs failed | Monitor this metric to ensure the Row Level TTL job is running. If it is non-zero, it means the job could not be created. | +| jobs.row_level_ttl.span_total_duration | NOT AVAILABLE | Duration for processing a span during row level TTL. | See Description. | +| jobs.row_level_ttl.select_duration | NOT AVAILABLE | Duration for select requests during row level TTL. | See Description. | +| jobs.row_level_ttl.delete_duration | NOT AVAILABLE | Duration for delete requests during row level TTL. | See Description. | +| jobs.row_level_ttl.num_active_spans | NOT AVAILABLE | Number of active spans the TTL job is deleting from. | See Description. | +| jobs.row_level_ttl.total_rows | NOT AVAILABLE | Approximate number of rows on the TTL table. | See Description. | +| jobs.row_level_ttl.total_expired_rows | NOT AVAILABLE | Approximate number of rows that have expired the TTL on the TTL table. | See Description. | + +## See also + +- [Available Metrics]({% link {{ page.version.version }}/metrics.md %}#available-metrics) +- [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}) +- [Visualize metrics in Grafana]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}#step-5-visualize-metrics-in-grafana) +- [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) +- [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) +- [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %}) +- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) diff --git a/src/current/_includes/v25.3/faq/auto-generate-unique-ids.md b/src/current/_includes/v25.3/faq/auto-generate-unique-ids.md new file mode 100644 index 00000000000..ebe3262e6df --- /dev/null +++ b/src/current/_includes/v25.3/faq/auto-generate-unique-ids.md @@ -0,0 +1,111 @@ +To auto-generate unique row identifiers, you can use the `gen_random_uuid()`, `uuid_v4()`, or `unique_rowid()` [functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). + +To use the [`UUID`]({% link {{ page.version.version }}/uuid.md %}) column with the `gen_random_uuid()` [function]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions) as the [default value]({% link {{ page.version.version }}/default-value.md %}): + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE users ( + id UUID NOT NULL DEFAULT gen_random_uuid(), + city STRING NOT NULL, + name STRING NULL, + address STRING NULL, + credit_card STRING NULL, + CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC), + FAMILY "primary" (id, city, name, address, credit_card) +); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +INSERT INTO users (name, city) VALUES ('Petee', 'new york'), ('Eric', 'seattle'), ('Dan', 'seattle'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM users; +~~~ + +~~~ + id | city | name | address | credit_card ++--------------------------------------+----------+-------+---------+-------------+ + cf8ee4e2-cd74-449a-b6e6-a0fb2017baa4 | new york | Petee | NULL | NULL + 2382564e-702f-42d9-a139-b6df535ae00a | seattle | Eric | NULL | NULL + 7d27e40b-263a-4891-b29b-d59135e55650 | seattle | Dan | NULL | NULL +(3 rows) +~~~ + +Alternatively, you can use the [`BYTES`]({% link {{ page.version.version }}/bytes.md %}) column with the `uuid_v4()` function as the default value: + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE users2 ( + id BYTES DEFAULT uuid_v4(), + city STRING NOT NULL, + name STRING NULL, + address STRING NULL, + credit_card STRING NULL, + CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC), + FAMILY "primary" (id, city, name, address, credit_card) +); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +INSERT INTO users2 (name, city) VALUES ('Anna', 'new york'), ('Jonah', 'seattle'), ('Terry', 'chicago'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM users; +~~~ + +~~~ + id | city | name | address | credit_card ++------------------------------------------------+----------+-------+---------+-------------+ + 4\244\277\323/\261M\007\213\275*\0060\346\025z | chicago | Terry | NULL | NULL + \273*t=u.F\010\274f/}\313\332\373a | new york | Anna | NULL | NULL + \004\\\364nP\024L)\252\364\222r$\274O0 | seattle | Jonah | NULL | NULL +(3 rows) +~~~ + +In either case, generated IDs will be 128-bit, sufficiently large to generate unique values. Once the table grows beyond a single key-value range's [default size]({% link {{ page.version.version }}/configure-replication-zones.md %}#range-max-bytes), new IDs will be scattered across all of the table's ranges and, therefore, likely across different nodes. This means that multiple nodes will share in the load. + +This approach has the disadvantage of creating a primary key that may not be useful in a query directly, which can require a join with another table or a secondary index. + +If it is important for generated IDs to be stored in the same key-value range, you can use an [integer type]({% link {{ page.version.version }}/int.md %}) with the `unique_rowid()` [function]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions) as the default value, either explicitly or via the [`SERIAL` pseudo-type]({% link {{ page.version.version }}/serial.md %}): + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE users3 ( + id INT DEFAULT unique_rowid(), + city STRING NOT NULL, + name STRING NULL, + address STRING NULL, + credit_card STRING NULL, + CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC), + FAMILY "primary" (id, city, name, address, credit_card) +); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +INSERT INTO users3 (name, city) VALUES ('Blake', 'chicago'), ('Hannah', 'seattle'), ('Bobby', 'seattle'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM users3; +~~~ + +~~~ + id | city | name | address | credit_card ++--------------------+---------+--------+---------+-------------+ + 469048192112197633 | chicago | Blake | NULL | NULL + 469048192112263169 | seattle | Hannah | NULL | NULL + 469048192112295937 | seattle | Bobby | NULL | NULL +(3 rows) +~~~ + +Upon insert or upsert, the `unique_rowid()` function generates a default value from the timestamp and ID of the node executing the insert. Such time-ordered values are likely to be globally unique except in cases where a very large number of IDs (100,000+) are generated per node per second. Also, there can be gaps and the order is not completely guaranteed. + +To understand the differences between the `UUID` and `unique_rowid()` options, see the [SQL FAQs]({% link {{ page.version.version }}/sql-faqs.md %}#what-are-the-differences-between-uuid-sequences-and-unique_rowid). For further background on UUIDs, see [What is a UUID, and Why Should You Care?](https://www.cockroachlabs.com/blog/what-is-a-uuid/). diff --git a/src/current/_includes/v25.3/faq/clock-synchronization-effects.md b/src/current/_includes/v25.3/faq/clock-synchronization-effects.md new file mode 100644 index 00000000000..e335a97fc3e --- /dev/null +++ b/src/current/_includes/v25.3/faq/clock-synchronization-effects.md @@ -0,0 +1,31 @@ +CockroachDB requires moderate levels of clock synchronization to preserve data consistency. For this reason, when a node detects that its clock is out of sync with at least half of the other nodes in the cluster by 80% of the maximum offset allowed, it spontaneously shuts down. This offset defaults to 500ms but can be changed via the [`--max-offset`]({% link {{ page.version.version }}/cockroach-start.md %}#flags-max-offset) flag when starting each node. + +Regardless of clock skew, [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) and [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions both serve globally consistent ("non-stale") reads and [commit atomically]({% link {{ page.version.version }}/developer-basics.md %}#how-transactions-work-in-cockroachdb). However, skew outside the configured clock offset bounds can result in violations of single-key linearizability between causally dependent transactions. It's therefore important to prevent clocks from drifting too far by running [NTP](http://www.ntp.org/) or other clock synchronization software on each node. + +In very rare cases, CockroachDB can momentarily run with a stale clock. This can happen when using vMotion, which can suspend a VM running CockroachDB, migrate it to different hardware, and resume it. This will cause CockroachDB to be out of sync for a short period before it jumps to the correct time. During this window, it would be possible for a client to read stale data and write data derived from stale reads. By enabling the `server.clock.forward_jump_check_enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}), you can be alerted when the CockroachDB clock jumps forward, indicating it had been running with a stale clock. To protect against this on vMotion, however, use the [`--clock-device`](cockroach-start.html#general) flag to specify a [PTP hardware clock](https://www.kernel.org/doc/html/latest/driver-api/ptp.html) for CockroachDB to use when querying the current time. When doing so, you should not enable `server.clock.forward_jump_check_enabled` because forward jumps will be expected and harmless. For more information on how `--clock-device` interacts with vMotion, see [this blog post](https://core.vmware.com/blog/cockroachdb-vmotion-support-vsphere-7-using-precise-timekeeping). + +{{site.data.alerts.callout_danger}} +In CockroachDB versions prior to v22.2.13, and in v23.1 versions prior to v23.1.9, the [`--clock-device`](cockroach-start.html#general) flag had a bug that could cause it to generate timestamps in the far future. This could cause nodes to crash due to incorrect timestamps, or in the worst case irreversibly advance the cluster's HLC clock into the far future. This bug is fixed in CockroachDB v23.2. +{{site.data.alerts.end}} + +### Considerations + +When setting up clock synchronization: + +- All nodes in the cluster must be synced to the same time source, or to different sources that implement leap second smearing in the same way. For example, Google and Amazon have time sources that are compatible with each other (they implement [leap second smearing](https://developers.google.com/time/smear) in the same way), but are incompatible with the default NTP pool (which does not implement leap second smearing). +- For nodes running in AWS, we recommend [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html#configure-amazon-time-service). For nodes running in GCP, we recommend [Google's internal NTP service](https://cloud.google.com/compute/docs/instances/configure-ntp#configure_ntp_for_your_instances). For nodes running elsewhere, we recommend [Google Public NTP](https://developers.google.com/time/). Note that the Google and Amazon time services can be mixed with each other, but they cannot be mixed with other time services (unless you have verified leap second behavior). Either all of your nodes should use the Google and Amazon services, or none of them should. +- If you do not want to use the Google or Amazon time sources, you can use [`chrony`](https://chrony.tuxfamily.org/index.html) and enable client-side leap smearing, unless the time source you're using already does server-side smearing. In most cases, we recommend the Google Public NTP time source because it handles smearing the leap second. If you use a different NTP time source that doesn't smear the leap second, you must configure client-side smearing manually and do so in the same way on each machine. +- Do not run more than one clock sync service on VMs where `cockroach` is running. +- {% include {{ page.version.version }}/misc/multiregion-max-offset.md %} + +### Tutorials + +For guidance on synchronizing clocks, see the tutorial for your deployment environment: + +Environment | Featured Approach +------------|--------------------- +[On-Premises]({% link {{ page.version.version }}/deploy-cockroachdb-on-premises.md %}#step-1-synchronize-clocks) | Use NTP with Google's external NTP service. +[AWS]({% link {{ page.version.version }}/deploy-cockroachdb-on-aws.md %}#step-3-synchronize-clocks) | Use the Amazon Time Sync Service. +[Azure]({% link {{ page.version.version }}/deploy-cockroachdb-on-microsoft-azure.md %}#step-3-synchronize-clocks) | Disable Hyper-V time synchronization and use NTP with Google's external NTP service. +[Digital Ocean]({% link {{ page.version.version }}/deploy-cockroachdb-on-digital-ocean.md %}#step-2-synchronize-clocks) | Use NTP with Google's external NTP service. +[GCE]({% link {{ page.version.version }}/deploy-cockroachdb-on-google-cloud-platform.md %}#step-3-synchronize-clocks) | Use NTP with Google's internal NTP service. diff --git a/src/current/_includes/v25.3/faq/clock-synchronization-monitoring.md b/src/current/_includes/v25.3/faq/clock-synchronization-monitoring.md new file mode 100644 index 00000000000..c3022ad1a32 --- /dev/null +++ b/src/current/_includes/v25.3/faq/clock-synchronization-monitoring.md @@ -0,0 +1,8 @@ +As explained in more detail [in our monitoring documentation]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint), each CockroachDB node exports a wide variety of metrics at `http://:/_status/vars` in the format used by the popular Prometheus timeseries database. Two of these metrics export how close each node's clock is to the clock of all other nodes: + +Metric | Definition +-------|----------- +`clock_offset_meannanos` | The mean difference between the node's clock and other nodes' clocks in nanoseconds +`clock_offset_stddevnanos` | The standard deviation of the difference between the node's clock and other nodes' clocks in nanoseconds + +As described in [the above answer](#what-happens-when-node-clocks-are-not-properly-synchronized), a node will shut down if the mean offset of its clock from the other nodes' clocks exceeds 80% of the maximum offset allowed. It's recommended to monitor the `clock_offset_meannanos` metric and alert if it's approaching the 80% threshold of your cluster's configured max offset. diff --git a/src/current/_includes/v25.3/faq/differences-between-numberings.md b/src/current/_includes/v25.3/faq/differences-between-numberings.md new file mode 100644 index 00000000000..80f7fe26d50 --- /dev/null +++ b/src/current/_includes/v25.3/faq/differences-between-numberings.md @@ -0,0 +1,11 @@ + +| Property | UUID generated with `uuid_v4()` | INT generated with `unique_rowid()` | Sequences | +|--------------------------------------|-----------------------------------------|-----------------------------------------------|--------------------------------| +| Size | 16 bytes | 8 bytes | 1 to 8 bytes | +| Ordering properties | Unordered | Highly time-ordered | Highly time-ordered | +| Performance cost at generation | Small, scalable | Small, scalable | Variable, can cause [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention) | +| Value distribution | Uniformly distributed (128 bits) | Contains time and space (node ID) components | Dense, small values | +| Data locality | Maximally distributed | Values generated close in time are co-located | Highly local | +| `INSERT` latency when used as key | Small, insensitive to concurrency | Small, but increases with concurrent INSERTs | Higher | +| `INSERT` throughput when used as key | Highest | Limited by max throughput on 1 node | Limited by max throughput on 1 node | +| Read throughput when used as key | Highest (maximal parallelism) | Limited | Limited | diff --git a/src/current/_includes/v25.3/faq/sequential-numbers.md b/src/current/_includes/v25.3/faq/sequential-numbers.md new file mode 100644 index 00000000000..aca5750d0a7 --- /dev/null +++ b/src/current/_includes/v25.3/faq/sequential-numbers.md @@ -0,0 +1,8 @@ +Sequential numbers can be generated in CockroachDB using the `unique_rowid()` built-in function or using [SQL sequences]({% link {{ page.version.version }}/create-sequence.md %}). However, note the following considerations: + +- Unless you need roughly-ordered numbers, use [`UUID`]({% link {{ page.version.version }}/uuid.md %}) values instead. See the [previous +FAQ](#how-do-i-auto-generate-unique-row-ids-in-cockroachdb) for details. +- [Sequences]({% link {{ page.version.version }}/create-sequence.md %}) produce **unique** values. However, not all values are guaranteed to be produced (e.g., when a transaction is canceled after it consumes a value) and the values may be slightly reordered (e.g., when a transaction that +consumes a lower sequence number commits after a transaction that consumes a higher number). +- For maximum performance, avoid using sequences or `unique_rowid()` to generate row IDs or indexed columns. Values generated in these ways are logically close to each other and can cause [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention) on a few data ranges during inserts. Instead, prefer [`UUID`]({% link {{ page.version.version }}/uuid.md %}) identifiers. +- {% include {{page.version.version}}/performance/use-hash-sharded-indexes.md %} diff --git a/src/current/_includes/v25.3/faq/sequential-transactions.md b/src/current/_includes/v25.3/faq/sequential-transactions.md new file mode 100644 index 00000000000..21e4a8c212b --- /dev/null +++ b/src/current/_includes/v25.3/faq/sequential-transactions.md @@ -0,0 +1,19 @@ +Most use cases that ask for a strong time-based write ordering can be solved with other, more distribution-friendly +solutions instead. For example, CockroachDB's [time travel queries (`AS OF SYSTEM +TIME`)](https://www.cockroachlabs.com/blog/time-travel-queries-select-witty_subtitle-the_future/) support the following: + +- Paginating through all the changes to a table or dataset +- Determining the order of changes to data over time +- Determining the state of data at some point in the past +- Determining the changes to data between two points of time + +Consider also that the values generated by `unique_rowid()`, described in the previous FAQ entries, also provide an approximate time ordering. + +However, if your application absolutely requires strong time-based write ordering, it is possible to create a strictly monotonic counter in CockroachDB that increases over time as follows: + +- Initially: `CREATE TABLE cnt(val INT PRIMARY KEY); INSERT INTO cnt(val) VALUES(1);` +- In each transaction: `INSERT INTO cnt(val) SELECT max(val)+1 FROM cnt RETURNING val;` + +This will cause [`INSERT`]({% link {{ page.version.version }}/insert.md %}) transactions to conflict with each other and effectively force the transactions to commit one at a time throughout the cluster, which in turn guarantees the values generated in this way are strictly increasing over time without gaps. The caveat is that performance is severely limited as a result. + +If you find yourself interested in this problem, please [contact us]({% link {{ page.version.version }}/support-resources.md %}) and describe your situation. We would be glad to help you find alternative solutions and possibly extend CockroachDB to better match your needs. diff --git a/src/current/_includes/v25.3/faq/simulate-key-value-store.md b/src/current/_includes/v25.3/faq/simulate-key-value-store.md new file mode 100644 index 00000000000..13beebeb957 --- /dev/null +++ b/src/current/_includes/v25.3/faq/simulate-key-value-store.md @@ -0,0 +1,13 @@ +CockroachDB is a distributed SQL database built on a transactional and strongly-consistent key-value store. Although it is not possible to access the key-value store directly, you can mirror direct access using a "simple" table of two columns, with one set as the primary key: + +~~~ sql +> CREATE TABLE kv (k INT PRIMARY KEY, v BYTES); +~~~ + +When such a "simple" table has no indexes or foreign keys, [`INSERT`]({% link {{ page.version.version }}/insert.md %})/[`UPSERT`]({% link {{ page.version.version }}/upsert.md %})/[`UPDATE`]({% link {{ page.version.version }}/update.md %})/[`DELETE`](delete.html) statements translate to key-value operations with minimal overhead (single digit percent slowdowns). For example, the following `UPSERT` to add or replace a row in the table would translate into a single key-value Put operation: + +~~~ sql +> UPSERT INTO kv VALUES (1, b'hello') +~~~ + +This SQL table approach also offers you a well-defined query language, a known transaction model, and the flexibility to add more columns to the table if the need arises. diff --git a/src/current/_includes/v25.3/faq/what-is-crdb.md b/src/current/_includes/v25.3/faq/what-is-crdb.md new file mode 100644 index 00000000000..28857ed61fa --- /dev/null +++ b/src/current/_includes/v25.3/faq/what-is-crdb.md @@ -0,0 +1,7 @@ +CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. + +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. + +{{site.data.alerts.callout_success}} +For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/filter-tabs/crdb-kubernetes.md b/src/current/_includes/v25.3/filter-tabs/crdb-kubernetes.md new file mode 100644 index 00000000000..db7f18ff324 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crdb-kubernetes.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "orchestrate-a-local-cluster-with-kubernetes.html;orchestrate-a-local-cluster-with-kubernetes-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crdb-single-kubernetes.md b/src/current/_includes/v25.3/filter-tabs/crdb-single-kubernetes.md new file mode 100644 index 00000000000..409bdc1855c --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crdb-single-kubernetes.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "deploy-cockroachdb-with-kubernetes.html;deploy-cockroachdb-with-kubernetes-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crud-go.md b/src/current/_includes/v25.3/filter-tabs/crud-go.md new file mode 100644 index 00000000000..a69d0e4435c --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crud-go.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use pgx;Use GORM;Use lib/pq;Use upper/db" %} +{% assign html_page_filenames = "build-a-go-app-with-cockroachdb.html;build-a-go-app-with-cockroachdb-gorm.html;build-a-go-app-with-cockroachdb-pq.html;build-a-go-app-with-cockroachdb-upperdb.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crud-java.md b/src/current/_includes/v25.3/filter-tabs/crud-java.md new file mode 100644 index 00000000000..5cbdf749e09 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crud-java.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use JDBC;Use Hibernate;Use jOOQ;Use MyBatis-Spring" %} +{% assign html_page_filenames = "build-a-java-app-with-cockroachdb.html;build-a-java-app-with-cockroachdb-hibernate.html;build-a-java-app-with-cockroachdb-jooq.html;build-a-spring-app-with-cockroachdb-mybatis.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crud-js.md b/src/current/_includes/v25.3/filter-tabs/crud-js.md new file mode 100644 index 00000000000..bb319ed88c1 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crud-js.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use node-postgres;Use Sequelize;Use Knex.js;Use Prisma;Use TypeORM" %} +{% assign html_page_filenames = "build-a-nodejs-app-with-cockroachdb.html;build-a-nodejs-app-with-cockroachdb-sequelize.html;build-a-nodejs-app-with-cockroachdb-knexjs.html;build-a-nodejs-app-with-cockroachdb-prisma.html;build-a-typescript-app-with-cockroachdb.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crud-python.md b/src/current/_includes/v25.3/filter-tabs/crud-python.md new file mode 100644 index 00000000000..e721cc92405 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crud-python.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use psycopg3;Use psycopg2;Use SQLAlchemy;Use Django;Use asyncpg" %} +{% assign html_page_filenames = "build-a-python-app-with-cockroachdb-psycopg3.html;build-a-python-app-with-cockroachdb.html;build-a-python-app-with-cockroachdb-sqlalchemy.html;build-a-python-app-with-cockroachdb-django.html;build-a-python-app-with-cockroachdb-asyncpg.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crud-ruby.md b/src/current/_includes/v25.3/filter-tabs/crud-ruby.md new file mode 100644 index 00000000000..5fc13aa697b --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crud-ruby.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use pg;Use ActiveRecord" %} +{% assign html_page_filenames = "build-a-ruby-app-with-cockroachdb.html;build-a-ruby-app-with-cockroachdb-activerecord.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/crud-spring.md b/src/current/_includes/v25.3/filter-tabs/crud-spring.md new file mode 100644 index 00000000000..bd4f66f19a7 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/crud-spring.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use JDBC;Use JPA" %} +{% assign html_page_filenames = "build-a-spring-app-with-cockroachdb-jdbc.html;build-a-spring-app-with-cockroachdb-jpa.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/deploy-crdb-aws.md b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-aws.md new file mode 100644 index 00000000000..706e5d85b8f --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-aws.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "deploy-cockroachdb-on-aws.html;deploy-cockroachdb-on-aws-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/deploy-crdb-do.md b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-do.md new file mode 100644 index 00000000000..02e44afee30 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-do.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "deploy-cockroachdb-on-digital-ocean.html;deploy-cockroachdb-on-digital-ocean-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/deploy-crdb-gce.md b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-gce.md new file mode 100644 index 00000000000..5799dfec9f0 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-gce.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "deploy-cockroachdb-on-google-cloud-platform.html;deploy-cockroachdb-on-google-cloud-platform-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/deploy-crdb-ma.md b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-ma.md new file mode 100644 index 00000000000..3f1162b426c --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-ma.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "deploy-cockroachdb-on-microsoft-azure.html;deploy-cockroachdb-on-microsoft-azure-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/deploy-crdb-op.md b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-op.md new file mode 100644 index 00000000000..fdf35c61162 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/deploy-crdb-op.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "deploy-cockroachdb-on-premises.html;deploy-cockroachdb-on-premises-insecure.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/perf-bench-tpc-c.md b/src/current/_includes/v25.3/filter-tabs/perf-bench-tpc-c.md new file mode 100644 index 00000000000..1394f916add --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/perf-bench-tpc-c.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Local;Local (Multi-Region);Small;Medium;Large" %} +{% assign html_page_filenames = "performance-benchmarking-with-tpcc-local.html;performance-benchmarking-with-tpcc-local-multiregion.html;performance-benchmarking-with-tpcc-small.html;performance-benchmarking-with-tpcc-medium.html;performance-benchmarking-with-tpcc-large.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/security-cert.md b/src/current/_includes/v25.3/filter-tabs/security-cert.md new file mode 100644 index 00000000000..0832e618021 --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/security-cert.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Use cockroach cert;Use OpenSSL;Use custom CA" %} +{% assign html_page_filenames = "cockroach-cert.html;create-security-certificates-openssl.html;create-security-certificates-custom-ca.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/filter-tabs/start-a-cluster.md b/src/current/_includes/v25.3/filter-tabs/start-a-cluster.md new file mode 100644 index 00000000000..92a688078cb --- /dev/null +++ b/src/current/_includes/v25.3/filter-tabs/start-a-cluster.md @@ -0,0 +1,4 @@ +{% assign tab_names_html = "Secure;Insecure" %} +{% assign html_page_filenames = "secure-a-cluster.html;start-a-local-cluster.html" %} + +{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %} diff --git a/src/current/_includes/v25.3/finalization-required/119894.md b/src/current/_includes/v25.3/finalization-required/119894.md new file mode 100644 index 00000000000..f2b393c3c0e --- /dev/null +++ b/src/current/_includes/v25.3/finalization-required/119894.md @@ -0,0 +1 @@ +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) diff --git a/src/current/_includes/v25.3/import-export-auth.md b/src/current/_includes/v25.3/import-export-auth.md new file mode 100644 index 00000000000..fc3f2938cb4 --- /dev/null +++ b/src/current/_includes/v25.3/import-export-auth.md @@ -0,0 +1,9 @@ +The following examples make use of: + +- Amazon S3 connection strings. For guidance on connecting to other storage options or using other authentication parameters instead, read [Use Cloud Storage]({% link {{ page.version.version }}/use-cloud-storage.md %}#example-file-urls). +- The **default** `AUTH=specified` parameter. For guidance on using `AUTH=implicit` authentication with Amazon S3 buckets instead, read [Cloud Storage Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). + +Also, note the following features for connecting and authenticating to cloud storage: + +- External connections, which allow you to represent an external storage or sink URI. You can then specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page. +- Assume role authentication, which allows you to limit the control specific users have over your storage buckets. See [Assume role authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) for more information. \ No newline at end of file diff --git a/src/current/_includes/v25.3/install-docker-steps.md b/src/current/_includes/v25.3/install-docker-steps.md new file mode 100644 index 00000000000..09126119a2c --- /dev/null +++ b/src/current/_includes/v25.3/install-docker-steps.md @@ -0,0 +1,57 @@ +{% comment %}This include is used in install-cockroachdb-*.md{% endcomment %} +{% capture deployment_link %} +{% if page.name contains "mac" %}[Deploy a local container in Docker]({% link {{ page.version.version }}/start-a-local-cluster-in-docker-mac.md %}) +{% elsif page.name contains "windows" %}[Deploy a local container in Docker]({% link {{ page.version.version }}/start-a-local-cluster-in-docker-windows.md %}) +{% else %}[Deploy a local container in Docker]({% link {{ page.version.version }}/start-a-local-cluster-in-docker-linux.md %}) +{% endif %} +{% endcapture %} + +{{site.data.alerts.callout_danger}} +Running a stateful application like CockroachDB in Docker is more complex and error-prone than most uses of Docker. Unless you are very experienced with Docker, we recommend starting with a different installation and deployment method. +{{site.data.alerts.end}} + +CockroachDB's Docker images are [multi-platform images](https://docs.docker.com/build/building/multi-platform/) that contain binaries for both Intel and ARM. Multi-platform images do not take up additional space on your Docker host. + +Experimental images are not qualified for production use and not eligible for support or uptime SLA commitments. + +1. Install a container runtime, such as [Docker Desktop](https://docs.docker.com/desktop/). +1. Verify that the runtime service is installed correctly and running in the background. Refer to the runtime's documentation. For Docker, start a terminal and run `docker version`. If you get an error, verify your installation and try again. +1. Visit [Docker Hub](https://hub.docker.com/r/cockroachdb/cockroach) and decide which image tag to pull. Releases are rolled out gradually. Docker images for a new release are published when other binary artifacts are published. The following tag formats are commonly used, although other tags are available. + +
+ + + + + + + + + + + + + + + + + + + + + + + +
TagExampleDescription
An exact patch`{{ page.version.name }}`Pins a cluster to an exact patch. The cluster is upgraded to a newer patch or major version only when you pull a newer tag.
Latest patch within a major version`latest-{{ page.version.version }}`Automatically updates a cluster to the latest patch of the version you specify. This tag is recommended in production, because it keeps your cluster updated within a major version but does not automatically upgrade your cluster to a new major version.
`latest`The latest patch within the latest major version. + This is the default if you do not specify a tag. It updates your cluster automatically to each new patch and major version, and is not recommended in production.
+ + Copy the tag you want to pull. + +1. Pull the image. Replace `{TAG}` with the tag from the previous step. + + {% include_cached copy-clipboard.html %} + ~~~ shell + docker pull cockroachdb/cockroach:{TAG} + ~~~ + +1. Start a cluster by starting the container on each node using `docker start`. The default command is `cockroach start`. Pass your desired flags as the final argument. For details, refer to {{ deployment_link | strip }}. diff --git a/src/current/_includes/v25.3/json/json-sample.go b/src/current/_includes/v25.3/json/json-sample.go new file mode 100644 index 00000000000..d5953a71ee2 --- /dev/null +++ b/src/current/_includes/v25.3/json/json-sample.go @@ -0,0 +1,79 @@ +package main + +import ( + "database/sql" + "fmt" + "io/ioutil" + "net/http" + "time" + + _ "github.com/lib/pq" +) + +func main() { + db, err := sql.Open("postgres", "user=maxroach dbname=jsonb_test sslmode=disable port=26257") + if err != nil { + panic(err) + } + + // The Reddit API wants us to tell it where to start from. The first request + // we just say "null" to say "from the start", subsequent requests will use + // the value received from the last call. + after := "null" + + for i := 0; i < 41; i++ { + after, err = makeReq(db, after) + if err != nil { + panic(err) + } + // Reddit limits to 30 requests per minute, so do not do any more than that. + time.Sleep(2 * time.Second) + } +} + +func makeReq(db *sql.DB, after string) (string, error) { + // First, make a request to reddit using the appropriate "after" string. + client := &http.Client{} + req, err := http.NewRequest("GET", fmt.Sprintf("https://www.reddit.com/r/programming.json?after=%s", after), nil) + + req.Header.Add("User-Agent", `Go`) + + resp, err := client.Do(req) + if err != nil { + return "", err + } + + res, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + // We've gotten back our JSON from reddit, we can use a couple SQL tricks to + // accomplish multiple things at once. + // The JSON reddit returns looks like this: + // { + // "data": { + // "children": [ ... ] + // }, + // "after": ... + // } + // We structure our query so that we extract the `children` field, and then + // expand that and insert each individual element into the database as a + // separate row. We then return the "after" field so we know how to make the + // next request. + r, err := db.Query(` + INSERT INTO jsonb_test.programming (posts) + SELECT json_array_elements($1->'data'->'children') + RETURNING $1->'data'->'after'`, + string(res)) + if err != nil { + return "", err + } + + // Since we did a RETURNING, we need to grab the result of our query. + r.Next() + var newAfter string + r.Scan(&newAfter) + + return newAfter, nil +} diff --git a/src/current/_includes/v25.3/json/json-sample.py b/src/current/_includes/v25.3/json/json-sample.py new file mode 100644 index 00000000000..49e302613e0 --- /dev/null +++ b/src/current/_includes/v25.3/json/json-sample.py @@ -0,0 +1,44 @@ +import json +import psycopg2 +import requests +import time + +conn = psycopg2.connect(database="jsonb_test", user="maxroach", host="localhost", port=26257) +conn.set_session(autocommit=True) +cur = conn.cursor() + +# The Reddit API wants us to tell it where to start from. The first request +# we just say "null" to say "from the start"; subsequent requests will use +# the value received from the last call. +url = "https://www.reddit.com/r/programming.json" +after = {"after": "null"} + +for n in range(41): + # First, make a request to reddit using the appropriate "after" string. + req = requests.get(url, params=after, headers={"User-Agent": "Python"}) + + # Decode the JSON and set "after" for the next request. + resp = req.json() + after = {"after": str(resp['data']['after'])} + + # Convert the JSON to a string to send to the database. + data = json.dumps(resp) + + # The JSON reddit returns looks like this: + # { + # "data": { + # "children": [ ... ] + # }, + # "after": ... + # } + # We structure our query so that we extract the `children` field, and then + # expand that and insert each individual element into the database as a + # separate row. + cur.execute("""INSERT INTO jsonb_test.programming (posts) + SELECT json_array_elements(%s->'data'->'children')""", (data,)) + + # Reddit limits to 30 requests per minute, so do not do any more than that. + time.sleep(2) + +cur.close() +conn.close() diff --git a/src/current/_includes/v25.3/known-limitations/admission-control-limitations.md b/src/current/_includes/v25.3/known-limitations/admission-control-limitations.md new file mode 100644 index 00000000000..26bff1a2dd7 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/admission-control-limitations.md @@ -0,0 +1,3 @@ +Admission control works on the level of each node, not at the cluster level. The admission control system queues requests until the operations are processed or the request exceeds the timeout value (for example by using [`SET statement_timeout`]({% link {{ page.version.version }}/set-vars.md %}#supported-variables)). If you specify aggressive timeout values, the system may operate correctly but have low throughput as the operations exceed the timeout value while only completing part of the work. There is no mechanism for preemptively rejecting requests when the work queues are long. + +Organizing operations by priority can mean that higher priority operations consume all the available resources while lower priority operations remain in the queue until the operation times out. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md new file mode 100644 index 00000000000..56dd7eeaacd --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md @@ -0,0 +1 @@ +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md new file mode 100644 index 00000000000..a183f2964f4 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md @@ -0,0 +1,8 @@ +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: + + ~~~ sql + ALTER CHANGEFEED {job_ID} ADD table WITH initial_scan = 'yes'; + ~~~ + + This will trigger an initial scan of the table and the changefeed will track `table`. The changefeed will **not** track `initial_scan` specified as an option, so it will not display in the output or after a `SHOW CHANGEFEED JOB` statement. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/alter-column-limitations.md b/src/current/_includes/v25.3/known-limitations/alter-column-limitations.md new file mode 100644 index 00000000000..50d5d1f1e92 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/alter-column-limitations.md @@ -0,0 +1,11 @@ +You cannot alter the data type of a column if: + +- The column is part of an [index]({% link {{ page.version.version }}/indexes.md %}). +- The column has [`CHECK` constraints]({% link {{ page.version.version }}/check.md %}). +- The column owns a [sequence]({% link {{ page.version.version }}/create-sequence.md %}). +- The `ALTER COLUMN TYPE` statement is part of a combined `ALTER TABLE` statement. +- The `ALTER COLUMN TYPE` statement is inside an [explicit transaction]({% link {{ page.version.version }}/begin-transaction.md %}). +- The column is part of a [TTL expression]({% link {{ page.version.version }}/row-level-ttl.md %}). +- The column is used in a [function body]({% link {{ page.version.version }}/user-defined-functions.md %}). +- The column is part of a [computed column expression]({% link {{ page.version.version }}/computed-columns.md %}). +- The column is referenced in a [view]({% link {{ page.version.version }}/views.md %}). diff --git a/src/current/_includes/v25.3/known-limitations/alter-type-limitations.md b/src/current/_includes/v25.3/known-limitations/alter-type-limitations.md new file mode 100644 index 00000000000..fa25e47f962 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/alter-type-limitations.md @@ -0,0 +1,2 @@ +- When running the [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}) statement, you can only reference a user-defined type from the database that contains the type. +- You can only [cancel]({% link {{ page.version.version }}/cancel-job.md %}) `ALTER TYPE` [schema change jobs]({% link {{ page.version.version }}/online-schema-changes.md %}) that drop values. This is because when you drop a value, CockroachDB searches through every row that could contain the type's value, which could take a long time. All other `ALTER TYPE` schema change jobs are [non-cancellable]({% link {{ page.version.version }}/cancel-job.md %}#known-limitations). \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md b/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md new file mode 100644 index 00000000000..642bed6ce08 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md @@ -0,0 +1,4 @@ +`ALTER VIEW` does not currently support: + +- Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. +- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/aost-limitations.md b/src/current/_includes/v25.3/known-limitations/aost-limitations.md new file mode 100644 index 00000000000..811c884d08d --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/aost-limitations.md @@ -0,0 +1 @@ +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/cancel-job-limitations.md b/src/current/_includes/v25.3/known-limitations/cancel-job-limitations.md new file mode 100644 index 00000000000..23080976a2a --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/cancel-job-limitations.md @@ -0,0 +1,8 @@ +- To avoid transaction states that cannot properly [roll back]({% link {{ page.version.version }}/rollback-transaction.md %}), the following statements cannot be cancelled with [`CANCEL JOB`]({% link {{ page.version.version }}/cancel-job.md %}): + + - `DROP` statements (e.g., [`DROP TABLE`]({% link {{ page.version.version }}/drop-table.md %})). + - `ALTER ... RENAME` statements (e.g., [`ALTER TABLE ... RENAME TO`]({% link {{ page.version.version }}/alter-table.md %}#rename-to)). + - [`CREATE TABLE ... AS`]({% link {{ page.version.version }}/create-table-as.md %}) statements. + - [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}) statements, except for those that drop values. + +- When an Enterprise [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) is canceled, partially restored data is properly cleaned up. This can have a minor, temporary impact on cluster performance. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md new file mode 100644 index 00000000000..b0aaf728177 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -0,0 +1,27 @@ +- CockroachDB cannot refresh {% if page.name == "views.md" %} materialized views {% else %} [materialized views]({% link {{ page.version.version }}/views.md %}#materialized-views) {% endif %} inside [explicit transactions]({% link {{ page.version.version }}/begin-transaction.md %}). Trying to refresh a materialized view inside an explicit transaction will result in an error. + 1. Start [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the sample `bank` data set: + + {% include_cached copy-clipboard.html %} + ~~~ shell + cockroach demo bank + ~~~ + 1. Create the materialized view described in [Usage]({% link {{ page.version.version }}/views.md %}#usage). + 1. Start a new multi-statement transaction with [`BEGIN TRANSACTION`]({% link {{ page.version.version }}/begin-transaction.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + BEGIN TRANSACTION; + ~~~ + 1. Inside the open transaction, attempt to [refresh the view]({% link {{ page.version.version }}/refresh.md %}). This will result in an error. + + {% include_cached copy-clipboard.html %} + ~~~ sql + REFRESH MATERIALIZED VIEW overdrawn_accounts; + ~~~ + + ~~~ + ERROR: cannot refresh view in an explicit transaction + SQLSTATE: 25000 + ~~~ + + [#66008](https://github.com/cockroachdb/cockroach/issues/66008) diff --git a/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md new file mode 100644 index 00000000000..505a8c9700e --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md @@ -0,0 +1 @@ +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/cdc-queries.md b/src/current/_includes/v25.3/known-limitations/cdc-queries.md new file mode 100644 index 00000000000..2839eba5eda --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/cdc-queries.md @@ -0,0 +1,7 @@ +- You can only apply CDC queries on a single table in each statement. +- Some [stable functions]({% link {{ page.version.version }}/functions-and-operators.md %}#built-in-functions), notably functions that return MVCC timestamps, are overridden to return the MVCC timestamp of the event, e.g., `transaction_timestamp` or `statement_timestamp`. Additionally, some [time-based functions]({% link {{ page.version.version }}/functions-and-operators.md %}#date-and-time-functions), such as `now()` are not supported. We recommend using the `transaction_timestamp()` function or the {% if page.name == "cdc-queries.md" %} `crdb_internal_mvcc_timestamp` {% else %}[`crdb_internal_mvcc_timestamp`]({% link {{ page.version.version }}/cdc-queries.md %}#crdb-internal-mvcc-timestamp) {% endif %} column instead. +- The following are not permitted in CDC queries: + - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). + - Sub-select queries. + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) diff --git a/src/current/_includes/v25.3/known-limitations/cdc.md b/src/current/_includes/v25.3/known-limitations/cdc.md new file mode 100644 index 00000000000..a473e94367c --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/cdc.md @@ -0,0 +1,8 @@ +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: + - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} + - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md new file mode 100644 index 00000000000..41744b9b4b4 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md @@ -0,0 +1 @@ +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/copy-syntax.md b/src/current/_includes/v25.3/known-limitations/copy-syntax.md new file mode 100644 index 00000000000..e64a075dcac --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/copy-syntax.md @@ -0,0 +1,5 @@ +CockroachDB does not yet support the following `COPY` syntax: + + - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) + - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) + - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) diff --git a/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md new file mode 100644 index 00000000000..09f86f51c48 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md @@ -0,0 +1 @@ +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/create-table-as-limitations.md b/src/current/_includes/v25.3/known-limitations/create-table-as-limitations.md new file mode 100644 index 00000000000..9f837eb074c --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/create-table-as-limitations.md @@ -0,0 +1 @@ +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) of tables created with `CREATE TABLE ... AS` is not automatically derived from the query results. You must specify new primary keys at table creation. For examples, see [Specify a primary key]({% link {{ page.version.version }}/create-table-as.md %}#specify-a-primary-key). \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/data-domiciling-limitations.md b/src/current/_includes/v25.3/known-limitations/data-domiciling-limitations.md new file mode 100644 index 00000000000..509a9c9599f --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/data-domiciling-limitations.md @@ -0,0 +1,4 @@ +- When columns are [indexed]({% link {{ page.version.version }}/indexes.md %}), a subset of data from the indexed columns may appear in [meta ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#meta-ranges) or other system tables. CockroachDB synchronizes these system ranges and system tables across nodes. This synchronization does not respect any multi-region settings applied via either the [multi-region SQL statements]({% link {{ page.version.version }}/multiregion-overview.md %}), or the low-level [zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) mechanism. +- [Zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) can be used for data placement but these features were historically built for performance, not for domiciling. The replication system's top priority is to prevent the loss of data and it may override the zone configurations if necessary to ensure data durability. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}#types-of-constraints). +- If your [log files]({% link {{ page.version.version }}/logging-overview.md %}) are kept in the region where they were generated, there is some cross-region leakage (like the system tables described previously), but the majority of user data that makes it into the logs is going to be homed in that region. If that's not strong enough, you can use the [log redaction functionality]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) to strip all raw data from the logs. You can also limit your log retention entirely. +- If you start a node with a [`--locality`]({% link {{ page.version.version }}/cockroach-start.md %}#locality) flag that says the node is in region _A_, but the node is actually running in some region _B_, data domiciling based on the inferred node placement will not work. A CockroachDB node only knows its locality based on the text supplied to the `--locality` flag; it can not ensure that it is actually running in that physical location. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/datadog-self-hosted-limitations.md b/src/current/_includes/v25.3/known-limitations/datadog-self-hosted-limitations.md new file mode 100644 index 00000000000..215f712404c --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/datadog-self-hosted-limitations.md @@ -0,0 +1 @@ +The integration of your CockroachDB {{ site.data.products.core }} cluster with Datadog only supports displaying cluster-wide averages of reported metrics. Filtering by a specific node is unsupported. diff --git a/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md b/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md new file mode 100644 index 00000000000..9fd1811cc43 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md @@ -0,0 +1 @@ +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md new file mode 100644 index 00000000000..95685f6adf1 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md @@ -0,0 +1,13 @@ +- [`ENUM`]({% link {{ page.version.version }}/enum.md %}) types are not dropped. +- [`DROP OWNED BY`]({% link {{ page.version.version }}/drop-owned-by.md %}) drops all owned objects as well as any [grants]({% link {{ page.version.version }}/grant.md %}) on objects not owned by the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles). +- If the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) for which you are trying to `DROP OWNED BY` was granted a [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) (i.e., using the [`GRANT SYSTEM ...`]({% link {{ page.version.version }}/grant.md %}#grant-system-level-privileges-on-the-entire-cluster) statement), the following error will be signalled: + + ~~~ + ERROR: cannot perform drop owned by if role has synthetic privileges; foo has entries in system.privileges + SQLSTATE: 0A000 + HINT: perform REVOKE SYSTEM ... for the relevant privileges foo has in system.privileges + ~~~ + + The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). + + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/drop-single-partition.md b/src/current/_includes/v25.3/known-limitations/drop-single-partition.md new file mode 100644 index 00000000000..ddda733e09e --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/drop-single-partition.md @@ -0,0 +1 @@ +CockroachDB does not currently support dropping a single partition from a table. In order to remove partitions, you can [repartition]({% unless page.name == "partitioning.md" %}{% link {{ page.version.version }}/partitioning.md %}{% endunless %}#repartition-a-table) the table. diff --git a/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md new file mode 100644 index 00000000000..90745f7e17a --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md @@ -0,0 +1 @@ +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/drop-unique-index-from-create-table.md b/src/current/_includes/v25.3/known-limitations/drop-unique-index-from-create-table.md new file mode 100644 index 00000000000..ebe7750ee62 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/drop-unique-index-from-create-table.md @@ -0,0 +1 @@ +[`UNIQUE` indexes]({% link {{ page.version.version }}/create-index.md %}) created as part of a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) statement cannot be removed without using [`CASCADE`]({% unless page.name == "drop-index.md" %}drop-index.html{% endunless %}#remove-an-index-and-dependent-objects-with-cascade). Unique indexes created with [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %}) do not have this limitation. diff --git a/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md b/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md new file mode 100644 index 00000000000..c0e94185948 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md @@ -0,0 +1,43 @@ +- The expression cannot reference columns outside the index's table. +- Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + CREATE TABLE t (a INT, b INT, UNIQUE INDEX ((a + b))); + ~~~ + + ~~~ + CREATE TABLE + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO NOTHING; + ~~~ + + ~~~ + invalid syntax: statement ignored: at or near "(": syntax error + SQLSTATE: 42601 + DETAIL: source SQL: + INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO NOTHING + ^ + HINT: try \h INSERT + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO UPDATE SET a = 10; + ~~~ + + ~~~ + invalid syntax: statement ignored: at or near "(": syntax error + SQLSTATE: 42601 + DETAIL: source SQL: + INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO UPDATE SET a = 10 + ^ + HINT: try \h INSERT + ~~~ + + [#67893](https://github.com/cockroachdb/cockroach/issues/67893) diff --git a/src/current/_includes/v25.3/known-limitations/failover-stop-application.md b/src/current/_includes/v25.3/known-limitations/failover-stop-application.md new file mode 100644 index 00000000000..7035b280fa7 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/failover-stop-application.md @@ -0,0 +1 @@ +After a failover, there is no mechanism to stop applications from connecting to the original primary cluster. It is necessary to redirect application traffic manually, such as by using a network load balancer or adjusting DNS records. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/follower-reads-limitations.md b/src/current/_includes/v25.3/known-limitations/follower-reads-limitations.md new file mode 100644 index 00000000000..f9ace7d02f6 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/follower-reads-limitations.md @@ -0,0 +1,67 @@ +##### Exact staleness reads and long-running writes + +Long-running write transactions will create [write intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) with a timestamp near when the transaction began. When an exact staleness follower read encounters a write intent, it will often end up in a ["transaction wait queue"]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#txnwaitqueue), waiting for the operation to complete; however, this runs counter to the benefit exact staleness reads provide. + +To counteract this, you can issue all follower reads in explicit [transactions set with `HIGH` priority]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities): + +```sql +BEGIN PRIORITY HIGH AS OF SYSTEM TIME follower_read_timestamp(); +SELECT ... +SELECT ... +COMMIT; +``` + +##### Exact staleness read timestamps must be far enough in the past + +If an exact staleness read is not using an [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}) value far enough in the past, CockroachDB cannot perform a follower read. Instead, the read must access the [leaseholder replica]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder). This adds network latency if the leaseholder is not the closest replica to the gateway node. Most users will [use the `follower_read_timestamp()` function]({% link {{ page.version.version }}/follower-reads.md %}#run-queries-that-use-exact-staleness-follower-reads) to get a timestamp far enough in the past that there is a high probability of getting a follower read. + +##### Bounded staleness read limitations + +Bounded staleness reads have the following limitations: + +- They must be used in a [single-statement (aka implicit) transaction]({% link {{ page.version.version }}/transactions.md %}#individual-statements). +- They must read from a single row. +- They must not require an [index]({% link {{ page.version.version }}/indexes.md %}) [join]({% link {{ page.version.version }}/joins.md %}). In other words, the index used by the read query must be either a [primary]({% link {{ page.version.version }}/primary-key.md %}) [index]({% link {{ page.version.version }}/indexes.md %}), or some other index that covers the entire query by [`STORING`]({% link {{ page.version.version }}/create-index.md %}#store-columns) all columns. + +For example, let's look at a read query that cannot be served as a bounded staleness read. We will use a [demo cluster]({% link {{ page.version.version }}/cockroach-demo.md %}), which automatically loads the [MovR dataset]({% link {{ page.version.version }}/movr.md %}). + +{% include_cached copy-clipboard.html %} +~~~ shell +cockroach demo +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT code FROM promo_codes AS OF SYSTEM TIME with_max_staleness('10s') LIMIT 1; +ERROR: unimplemented: cannot use bounded staleness for queries that may touch more than one row or require an index join +SQLSTATE: 0A000 +HINT: You have attempted to use a feature that is not yet implemented. +See: https://go.crdb.dev/issue-v/67562/v23.2 +~~~ + +As noted by the error message, this query cannot be served as a bounded staleness read because in this case it would touch more than one row. Even though we used a [`LIMIT 1` clause]({% link {{ page.version.version }}/limit-offset.md %}), the query would still have to touch more than one row in order to filter out the additional results. + +We can verify that more than one row would be touched by issuing [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}) on the same query, but without the [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}) clause: + +{% include_cached copy-clipboard.html %} +~~~ sql +EXPLAIN SELECT code FROM promo_codes LIMIT 5; +~~~ + +~~~ + info +------------------------------------------------------------------------------- + distribution: full + vectorized: true + + • scan + estimated row count: 1 (0.10% of the table; stats collected 1 minute ago) + table: promo_codes@primary + spans: LIMITED SCAN + limit: 1 +(8 rows) +~~~ + +The output verifies that this query performs a scan of the primary [index]({% link {{ page.version.version }}/indexes.md %}) on the `promo_codes` table, which is why it cannot be used for a bounded staleness read. + +For an example showing how to successfully perform a bounded staleness read, see [Run queries that use bounded staleness follower reads]({% link {{ page.version.version }}/follower-reads.md %}#run-queries-that-use-bounded-staleness-follower-reads). diff --git a/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md new file mode 100644 index 00000000000..c8753124a96 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md @@ -0,0 +1,9 @@ +- The following [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) do not immediately take effect, and instead only take effect when new statistics are collected for a table. + + - [`sql.stats.forecasts.max_decrease`]({% link {{ page.version.version }}/cluster-settings.md %}#setting-sql-stats-forecasts-max-decrease) + - [`sql.stats.forecasts.min_goodness_of_fit`]({% link {{ page.version.version }}/cluster-settings.md %}#setting-sql-stats-forecasts-min-goodness-of-fit) + - [`sql.stats.forecasts.min_observations`]({% link {{ page.version.version }}/cluster-settings.md %}#setting-sql-stats-forecasts-min-observations) + + Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). + + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md new file mode 100644 index 00000000000..7b5a83f2cae --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md @@ -0,0 +1,14 @@ +- Aspects of [text search configurations]({% link {{ page.version.version }}/full-text-search.md %}#text-search-configuration) other than the specified dictionary. +- `websearch_to_tsquery()` built-in function. +- `tsquery_phrase()` built-in function. +- `ts_rank_cd()` built-in function. +- `setweight()` built-in function. +- Inverted joins on `TSVECTOR` values. +- `tsvector || tsvector` comparisons. +- `tsquery || tsquery` comparisons. +- `tsquery && tsquery` comparisons. +- `tsquery <-> tsquery` comparisons. +- `!! tsquery` comparisons. +- `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. + +[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md new file mode 100644 index 00000000000..e28e66d5f32 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md @@ -0,0 +1,2 @@ +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/grant-revoke-schema-changes.md b/src/current/_includes/v25.3/known-limitations/grant-revoke-schema-changes.md new file mode 100644 index 00000000000..cbe1cd8d3eb --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/grant-revoke-schema-changes.md @@ -0,0 +1,19 @@ +User/role management operations (such as [`GRANT`]({% link {{ page.version.version }}/grant.md %}) and [`REVOKE`]({% link {{ page.version.version }}/revoke.md %})) are [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}). As such, they inherit the [limitations of schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}#known-limitations). + +For example, schema changes wait for concurrent [transactions]({% link {{ page.version.version }}/transactions.md %}) using the same resources as the schema changes to complete. In the case of [role memberships]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) being modified inside a transaction, most transactions need access to the set of role memberships. Using the default settings, role modifications require schema leases to expire, which can take up to 5 minutes. + +This means that [long-running transactions]({% link {{ page.version.version }}/query-behavior-troubleshooting.md %}#hanging-or-stuck-queries) elsewhere in the system can cause user/role management operations inside transactions to take several minutes to complete. This can have a cascading effect. When a user/role management operation inside a transaction takes a long time to complete, it can in turn block all user-initiated transactions being run by your application, since the user/role management operation in the transaction has to commit before any other transactions that access role memberships (i.e., most transactions) can make progress. + +If you want user/role management operations to finish more quickly, and do not care whether concurrent transactions will immediately see the side effects of those operations, set the [session variable]({% link {{ page.version.version }}/set-vars.md %}) `allow_role_memberships_to_change_during_transaction` to `true`. + +When this session variable is enabled, any user/role management operations issued in the current session will only need to wait for the completion of statements in other sessions where `allow_role_memberships_to_change_during_transaction` is not enabled. + +To accelerate user/role management operations across your entire application, you have the following options: + +1. Set the session variable in all sessions by [passing it in the client connection string]({% link {{ page.version.version }}/connection-parameters.md %}#supported-options-parameters). +1. Apply the `allow_role_memberships_to_change_during_transaction` setting globally to an entire cluster using the [`ALTER ROLE ALL`]({% link {{ page.version.version }}/alter-role.md %}#set-default-session-variable-values-for-all-users) statement: + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER ROLE ALL SET allow_role_memberships_to_change_during_transaction = true; + ~~~ diff --git a/src/current/_includes/v25.3/known-limitations/import-high-disk-contention.md b/src/current/_includes/v25.3/known-limitations/import-high-disk-contention.md new file mode 100644 index 00000000000..e5abd405038 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/import-high-disk-contention.md @@ -0,0 +1,6 @@ +[`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) can sometimes fail with a "context canceled" error, or can restart itself many times without ever finishing. If this is happening, it is likely due to a high amount of disk contention. This can be mitigated by setting the `kv.bulk_io_write.max_rate` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to a value below your max disk write speed. For example, to set it to 10MB/s, execute: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SET CLUSTER SETTING kv.bulk_io_write.max_rate = '10MB'; +~~~ diff --git a/src/current/_includes/v25.3/known-limitations/import-into-limitations.md b/src/current/_includes/v25.3/known-limitations/import-into-limitations.md new file mode 100644 index 00000000000..6a7cce6f727 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/import-into-limitations.md @@ -0,0 +1,11 @@ +[`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) has the following limitations: + +- While importing into an existing table, the table is taken offline. +- After importing into an existing table, [constraints]({% link {{ page.version.version }}/constraints.md %}) will be un-validated and need to be [re-validated]({% link {{ page.version.version }}/alter-table.md %}#validate-constraint). +- Imported rows must not conflict with existing rows in the table or any unique secondary indexes. +- `IMPORT INTO` works for only a single existing table. +- `IMPORT INTO` can sometimes fail with a "context canceled" error, or can restart itself many times without ever finishing. If this is happening, it is likely due to a high amount of disk contention. This can be mitigated by setting the `kv.bulk_io_write.max_rate` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to a value below your max disk write speed. For example, to set it to 10MB/s, execute: + {% include_cached copy-clipboard.html %} + ~~~ sql + SET CLUSTER SETTING kv.bulk_io_write.max_rate = '10MB'; + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/jsonb-limitations.md b/src/current/_includes/v25.3/known-limitations/jsonb-limitations.md new file mode 100644 index 00000000000..81f2da52861 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/jsonb-limitations.md @@ -0,0 +1 @@ +- You cannot use [primary key]({% link {{ page.version.version }}/primary-key.md %}), [foreign key]({% link {{ page.version.version }}/foreign-key.md %}), and [unique]({% link {{ page.version.version }}/unique.md %}) [constraints]({% link {{ page.version.version }}/constraints.md %}) on `JSONB` values. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md b/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md new file mode 100644 index 00000000000..9b51bfb6e87 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md @@ -0,0 +1,2 @@ +- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. [#144255](https://github.com/cockroachdb/cockroach/issues/144255) +- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. [#145099](https://github.com/cockroachdb/cockroach/issues/145099) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-column-families.md b/src/current/_includes/v25.3/known-limitations/ldr-column-families.md new file mode 100644 index 00000000000..2a7c3bbba52 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/ldr-column-families.md @@ -0,0 +1 @@ +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md b/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md new file mode 100644 index 00000000000..ac897af35a7 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md @@ -0,0 +1 @@ +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-indexes.md b/src/current/_includes/v25.3/known-limitations/ldr-indexes.md new file mode 100644 index 00000000000..0bf7f60c2d4 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/ldr-indexes.md @@ -0,0 +1 @@ +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-sequences.md b/src/current/_includes/v25.3/known-limitations/ldr-sequences.md new file mode 100644 index 00000000000..4e39f3630e3 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/ldr-sequences.md @@ -0,0 +1 @@ +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-triggers.md b/src/current/_includes/v25.3/known-limitations/ldr-triggers.md new file mode 100644 index 00000000000..55f8e885b97 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/ldr-triggers.md @@ -0,0 +1 @@ +Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-udfs.md b/src/current/_includes/v25.3/known-limitations/ldr-udfs.md new file mode 100644 index 00000000000..fb642f14751 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/ldr-udfs.md @@ -0,0 +1 @@ +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/locality-optimized-search-limited-records.md b/src/current/_includes/v25.3/known-limitations/locality-optimized-search-limited-records.md new file mode 100644 index 00000000000..7a2be1ca3ef --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/locality-optimized-search-limited-records.md @@ -0,0 +1 @@ +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} works only for queries selecting a limited number of records (up to 100,000 unique keys). diff --git a/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md new file mode 100644 index 00000000000..d6acf418aa8 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -0,0 +1 @@ +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) diff --git a/src/current/_includes/v25.3/known-limitations/logging-limitations.md b/src/current/_includes/v25.3/known-limitations/logging-limitations.md new file mode 100644 index 00000000000..78c70d14234 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/logging-limitations.md @@ -0,0 +1 @@ +Log files can only be accessed in the DB Console if they are stored in the same directory as the file sink for the `DEV` channel. diff --git a/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md new file mode 100644 index 00000000000..02f2bd787c4 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md @@ -0,0 +1 @@ +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). diff --git a/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md new file mode 100644 index 00000000000..c9861623314 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md @@ -0,0 +1 @@ +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/online-schema-changes-limitations.md b/src/current/_includes/v25.3/known-limitations/online-schema-changes-limitations.md new file mode 100644 index 00000000000..1e7b619fdf8 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/online-schema-changes-limitations.md @@ -0,0 +1,25 @@ +##### Schema changes within transactions + +Most schema changes should not be performed within an explicit transaction with multiple statements, as they do not have the same atomicity guarantees as other SQL statements. Execute schema changes either as single statements (as an implicit transaction), or in an explicit transaction consisting of the single schema change statement. There are some exceptions to this, detailed below. + +Schema changes keep your data consistent at all times, but they do not run inside [transactions][txns] in the general case. Making schema changes transactional would mean requiring a given schema change to propagate across all the nodes of a cluster. This would block all user-initiated transactions being run by your application, since the schema change would have to commit before any other transactions could make progress. This would prevent the cluster from servicing reads and writes during the schema change, requiring application downtime. + +{{site.data.alerts.callout_success}} +Some tools and applications may be able to workaround CockroachDB's lack of transactional schema changes by [enabling a setting that automatically commits before running schema changes inside transactions]({% link {{ page.version.version }}/online-schema-changes.md %}#enable-automatic-commit-before-running-schema-changes-inside-transactions). +{{site.data.alerts.end}} + +Some schema change operations can be run within explicit, multiple statement transactions. `CREATE TABLE` and `CREATE INDEX` statements can be run within the same transaction with the same atomicity guarantees as other SQL statements. There are no performance or rollback issues when using these statements within a multiple statement transaction. + +{% include {{ page.version.version }}/known-limitations/schema-changes-within-transactions.md %} + +##### Schema change DDL statements inside a multi-statement transaction can fail while other statements succeed + +{% include {{ page.version.version }}/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md %} + +##### No online schema changes if primary key change in progress + +You cannot start an online schema change on a table if a [primary key change]({% link {{ page.version.version }}/alter-table.md %}#alter-primary-key) is currently in progress on the same table. + +##### No online schema changes between executions of prepared statements + +{% include {{ page.version.version }}/known-limitations/schema-changes-between-prepared-statements.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/partitioning-with-placeholders.md b/src/current/_includes/v25.3/known-limitations/partitioning-with-placeholders.md new file mode 100644 index 00000000000..7abc2e1744a --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/partitioning-with-placeholders.md @@ -0,0 +1 @@ +When defining a [table partition]({% link {{ page.version.version }}/partitioning.md %}), either during table creation or table alteration, it is not possible to use placeholders in the `PARTITION BY` clause. diff --git a/src/current/_includes/v25.3/known-limitations/per-replica-circuit-breaker-limitations.md b/src/current/_includes/v25.3/known-limitations/per-replica-circuit-breaker-limitations.md new file mode 100644 index 00000000000..18ceb4fefed --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/per-replica-circuit-breaker-limitations.md @@ -0,0 +1,3 @@ +[Per-replica circuit breakers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#per-replica-circuit-breakers) have the following limitations: + +- They are not tripped if _all_ replicas of a range [become unavailable]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#db-console-shows-under-replicated-unavailable-ranges), because the circuit breaker mechanism operates per-replica. This means at least one replica needs to be available to receive the request in order for the breaker to trip. diff --git a/src/current/_includes/v25.3/known-limitations/physical-cluster-replication.md b/src/current/_includes/v25.3/known-limitations/physical-cluster-replication.md new file mode 100644 index 00000000000..c8a872968ef --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/physical-cluster-replication.md @@ -0,0 +1,3 @@ +- Physical cluster replication is supported in CockroachDB {{ site.data.products.core }} clusters on v23.2 or later. The primary cluster can be a [new]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-1-create-the-primary-cluster) or [existing]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#set-up-pcr-from-an-existing-cluster) cluster. The standby cluster must be a [new cluster started with the `--virtualized-empty` flag]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-2-create-the-standby-cluster). +- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}). +- Before failover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}). diff --git a/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md b/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md new file mode 100644 index 00000000000..c17954748a0 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md @@ -0,0 +1,25 @@ +{% if page.name != "known-limitations.md" # New limitations in v24.2 %} +{% endif %} +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- The following statements are not supported: + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) +- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) +- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) +- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) +- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) +- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) +- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) +- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md b/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md new file mode 100644 index 00000000000..63f83b15dd8 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md @@ -0,0 +1,6 @@ +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. +- [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. +- [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md b/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md new file mode 100644 index 00000000000..20ddbb0c930 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md @@ -0,0 +1,50 @@ +[`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) and [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables can be restored **only** if the regions of the backed-up table match those of the target database. All of the following must be true for `RESTORE` to be successful: + + * The [regions]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions) of the source database and the regions of the destination database have the same set of regions. + * The regions were added to each of the databases in the same order. + * The databases have the same [primary region]({% link {{ page.version.version }}/alter-database.md %}#set-primary-region). + + The following example would be considered as having **mismatched** regions because the database regions were not added in the same order and the primary regions do not match. + + Running on the source database: + + ~~~ sql + ALTER DATABASE source_database SET PRIMARY REGION "us-east1"; + ~~~ + ~~~ sql + ALTER DATABASE source_database ADD region "us-west1"; + ~~~ + + Running on the destination database: + + ~~~ sql + ALTER DATABASE destination_database SET PRIMARY REGION "us-west1"; + ~~~ + ~~~ sql + ALTER DATABASE destination_database ADD region "us-east1"; + ~~~ + + In addition, the following scenario has mismatched regions between the databases since the regions were not added to the database in the same order. + + Running on the source database: + + ~~~ sql + ALTER DATABASE source_database SET PRIMARY REGION "us-east1"; + ~~~ + ~~~ sql + ALTER DATABASE source_database ADD region "us-west1"; + ~~~ + + Running on the destination database: + + ~~~ sql + ALTER DATABASE destination_database SET PRIMARY REGION "us-west1"; + ~~~ + ~~~ sql + ALTER DATABASE destination_database ADD region "us-east1"; + ~~~ + ~~~ sql + ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; + ~~~ + + [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md new file mode 100644 index 00000000000..5390f2d09ee --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md @@ -0,0 +1 @@ +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) diff --git a/src/current/_includes/v25.3/known-limitations/restore-udf.md b/src/current/_includes/v25.3/known-limitations/restore-udf.md new file mode 100644 index 00000000000..a4a4bc080fe --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/restore-udf.md @@ -0,0 +1 @@ +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md b/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md new file mode 100644 index 00000000000..c85dea7987a --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md @@ -0,0 +1 @@ +`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. [#35370](https://github.com/cockroachdb/cockroach/issues/35370). diff --git a/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md b/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md new file mode 100644 index 00000000000..453059825a3 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md @@ -0,0 +1 @@ +Under certain conditions, such as when executing certain SQL functions, CockroachDB's row-level security (RLS) implementation exposes metadata about the number of restricted rows in a table. For example, when a user applies arbitrary SQL filters on a table with RLS enabled, it's possible for the user to see how many total rows are in the table; this count includes rows that the user does not have direct access to. This metadata leakage can also occur when statements like [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain.md %}) are used, as the output includes a count of the number of rows scanned by the query that can include the number of restricted rows, even though the rows themselves are not directly accessible. [#146952](https://github.com/cockroachdb/cockroach/issues/146952) diff --git a/src/current/_includes/v25.3/known-limitations/routine-limitations.md b/src/current/_includes/v25.3/known-limitations/routine-limitations.md new file mode 100644 index 00000000000..4718c6c7abf --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/routine-limitations.md @@ -0,0 +1,10 @@ +{% if page.name != "known-limitations.md" # New limitations in v24.2 %} +{% endif %} +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) +- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) +- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v25.3/known-limitations/row-level-ttl-limitations.md new file mode 100644 index 00000000000..54b1e3ee66e --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/row-level-ttl-limitations.md @@ -0,0 +1,5 @@ +- Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). +- Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. + - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. + - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. + - Finally, secondary indexes can also have a negative impact on the overall performance of [TTL jobs]({% link {{ page.version.version }}/row-level-ttl.md %}#view-running-ttl-jobs). According to internal testing, the [TTL job processing rate]({% link {{ page.version.version }}/ui-ttl-dashboard.md %}#processing-rate) is worse on tables with secondary indexes. If you encounter this situation, decreasing the [`ttl_delete_batch_size` storage parameter]({% link {{ page.version.version }}/row-level-ttl.md %}#param-ttl-delete-batch-size) may help by decreasing the number of ranges that need to be accessed by the job. diff --git a/src/current/_includes/v25.3/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md b/src/current/_includes/v25.3/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md new file mode 100644 index 00000000000..9a3d47b140f --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md @@ -0,0 +1,62 @@ +Most schema change [DDL](https://wikipedia.org/wiki/Data_definition_language#ALTER_statement) statements that run inside a multi-statement transaction with non-DDL statements can fail at [`COMMIT`]({% link {{ page.version.version }}/commit-transaction.md %}) time, even if other statements in the transaction succeed. This leaves such transactions in a "partially committed, partially aborted" state that may require manual intervention to determine whether the DDL statements succeeded. + +Some DDL statements do not have this limitation. `CREATE TABLE` and `CREATE INDEX` statements have the same atomicity guarantees as other statements within a transaction. + +If such a failure occurs, CockroachDB will emit a CockroachDB-specific error code, `XXA00`, and the following error message: + +``` +transaction committed but schema change aborted with error: +HINT: Some of the non-DDL statements may have committed successfully, but some of the DDL statement(s) failed. +Manual inspection may be required to determine the actual state of the database. +``` + +{{site.data.alerts.callout_danger}} +If you must execute schema change DDL statements inside a multi-statement transaction, we **strongly recommend** checking for this error code and handling it appropriately every time you execute such transactions. +{{site.data.alerts.end}} + +This error will occur in various scenarios, including but not limited to: + +- Creating a unique index fails because values aren't unique. +- The evaluation of a computed value fails. +- Adding a constraint (or a column with a constraint) fails because the constraint is violated for the default/computed values in the column. + +To see an example of this error, start by creating the following table. + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE T(x INT); +INSERT INTO T(x) VALUES (1), (2), (3); +~~~ + +Then, enter the following multi-statement transaction, which will trigger the error. + +{% include_cached copy-clipboard.html %} +~~~ sql +BEGIN; +ALTER TABLE t ADD CONSTRAINT unique_x UNIQUE(x); +INSERT INTO T(x) VALUES (3); +COMMIT; +~~~ + +~~~ +pq: transaction committed but schema change aborted with error: (23505): duplicate key value (x)=(3) violates unique constraint "unique_x" +HINT: Some of the non-DDL statements may have committed successfully, but some of the DDL statement(s) failed. +Manual inspection may be required to determine the actual state of the database. +~~~ + +In this example, the [`INSERT`]({% link {{ page.version.version }}/insert.md %}) statement committed, but the [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}) statement adding a [`UNIQUE` constraint]({% link {{ page.version.version }}/unique.md %}) failed. We can verify this by looking at the data in table `t` and seeing that the additional non-unique value `3` was successfully inserted. + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM t; +~~~ + +~~~ + x ++---+ + 1 + 2 + 3 + 3 +(4 rows) +~~~ diff --git a/src/current/_includes/v25.3/known-limitations/schema-changes-between-prepared-statements.md b/src/current/_includes/v25.3/known-limitations/schema-changes-between-prepared-statements.md new file mode 100644 index 00000000000..736fe99df61 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/schema-changes-between-prepared-statements.md @@ -0,0 +1,33 @@ +When the schema of a table targeted by a prepared statement changes after the prepared statement is created, future executions of the prepared statement could result in an error. For example, adding a column to a table referenced in a prepared statement with a `SELECT *` clause will result in an error: + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE users (id INT PRIMARY KEY); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +PREPARE prep1 AS SELECT * FROM users; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE users ADD COLUMN name STRING; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +INSERT INTO users VALUES (1, 'Max Roach'); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +EXECUTE prep1; +~~~ + +~~~ +ERROR: cached plan must not change result type +SQLSTATE: 0A000 +~~~ + +It's therefore recommended to explicitly list result columns instead of using `SELECT *` in prepared statements, when possible. diff --git a/src/current/_includes/v25.3/known-limitations/schema-changes-within-transactions.md b/src/current/_includes/v25.3/known-limitations/schema-changes-within-transactions.md new file mode 100644 index 00000000000..407d45d02c7 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/schema-changes-within-transactions.md @@ -0,0 +1,9 @@ +Within a single [transaction]({% link {{ page.version.version }}/transactions.md %}): + +- You can run schema changes inside the same transaction as a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) statement. For more information, see [Run schema changes inside a transaction with `CREATE TABLE`]({% link {{ page.version.version }}/online-schema-changes.md %}#run-schema-changes-inside-a-transaction-with-create-table). However, a `CREATE TABLE` statement containing [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) clauses cannot be followed by statements that reference the new table. +- [Schema change DDL statements inside a multi-statement transaction can fail while other statements succeed](#schema-change-ddl-statements-inside-a-multi-statement-transaction-can-fail-while-other-statements-succeed). +- [`DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) can result in data loss if one of the other schema changes in the transaction fails or is canceled. To work around this, move the `DROP COLUMN` statement to its own explicit transaction or run it in a single statement outside the existing transaction. + +{{site.data.alerts.callout_info}} +If a schema change within a transaction fails, manual intervention may be needed to determine which statement has failed. After determining which schema change(s) failed, you can then retry the schema change. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/known-limitations/secondary-regions-with-regional-by-row-tables.md b/src/current/_includes/v25.3/known-limitations/secondary-regions-with-regional-by-row-tables.md new file mode 100644 index 00000000000..721bf6f1339 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/secondary-regions-with-regional-by-row-tables.md @@ -0,0 +1,3 @@ +[Secondary regions]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) are not compatible with databases containing [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables. CockroachDB does not prevent you from defining secondary regions on databases with regional by row tables, but the interaction of these features is not supported. + +Therefore, Cockroach Labs recommends that you avoid defining secondary regions on databases that use regional by row table configurations. diff --git a/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md b/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md new file mode 100644 index 00000000000..894f1f9441a --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md @@ -0,0 +1,10 @@ +By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: + +- The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). +- The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). + +When running under `SERIALIZABLE` isolation, `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` locks should be thought of as best-effort, and should not be relied upon for correctness. Note that [serialization]({% link {{ page.version.version }}/demo-serializable.md %}) is preserved despite this limitation. This limitation is fixed when the `enable_durable_locking_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}#enable-durable-locking-for-serializable) is set to `true`. + +{{site.data.alerts.callout_info}} +This limitation does **not** apply to [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md new file mode 100644 index 00000000000..414cbac6282 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md @@ -0,0 +1,17 @@ +{% if page.name == "set-vars.md" %} `SET` {% else %} [`SET`]({% link {{ page.version.version }}/set-vars.md %}) {% endif %} does not properly apply [`ROLLBACK`]({% link {{ page.version.version }}/rollback-transaction.md %}) within a transaction. For example, in the following transaction, showing the `TIME ZONE` [variable]({% link {{ page.version.version }}/set-vars.md %}#supported-variables) does not return `2` as expected after the rollback: + +~~~sql +SET TIME ZONE +2; +BEGIN; +SET TIME ZONE +3; +ROLLBACK; +SHOW TIME ZONE; +~~~ + +~~~sql +timezone +------------ +3 +~~~ + +[#69396](https://github.com/cockroachdb/cockroach/issues/69396) diff --git a/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md b/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md new file mode 100644 index 00000000000..38ba86fb28f --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md @@ -0,0 +1 @@ +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/sql-cursors.md b/src/current/_includes/v25.3/known-limitations/sql-cursors.md new file mode 100644 index 00000000000..bceff96d5a6 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/sql-cursors.md @@ -0,0 +1,8 @@ +CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: + +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) diff --git a/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md b/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md new file mode 100644 index 00000000000..b556a9fbecd --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md @@ -0,0 +1 @@ +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md new file mode 100644 index 00000000000..3d5a8d26325 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md @@ -0,0 +1 @@ +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) diff --git a/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md b/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md new file mode 100644 index 00000000000..b2ba1b61562 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md @@ -0,0 +1,3 @@ +{% if page.name != "known-limitations.md" # New limitations in v24.2 %} +{% endif %} +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/trigger-limitations.md b/src/current/_includes/v25.3/known-limitations/trigger-limitations.md new file mode 100644 index 00000000000..fb1c4685480 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/trigger-limitations.md @@ -0,0 +1,3 @@ +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) +- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) +- {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md new file mode 100644 index 00000000000..494730c7ae8 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md @@ -0,0 +1,9 @@ +- `word_similarity()` built-in function. +- `strict_word_similarity()` built-in function. +- `%>` and `<%` comparisons and acceleration. +- `<<%` and `%>>` comparisons and acceleration. +- `<->`, `<<->`, `<->>`, `<<<->`, and `<->>>` comparisons. +- Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). +- `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). + +[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/udf-limitations.md b/src/current/_includes/v25.3/known-limitations/udf-limitations.md new file mode 100644 index 00000000000..8b2ef66b403 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/udf-limitations.md @@ -0,0 +1,9 @@ +{% if page.name != "known-limitations.md" # New limitations in v24.2 %} +{% endif %} +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- User-defined functions are not currently supported in: + - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) + - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) +- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) +- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/vector-limitations.md b/src/current/_includes/v25.3/known-limitations/vector-limitations.md new file mode 100644 index 00000000000..ffdda4d5687 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/vector-limitations.md @@ -0,0 +1,8 @@ +- {% include {{ page.version.version }}/sql/vector-batch-inserts.md %} +- Creating a vector index through a backfill disables mutations ([`INSERT`]({% link {{ page.version.version }}/insert.md %}), [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}), [`UPDATE`]({% link {{ page.version.version }}/update.md %}), [`DELETE`]({% link {{ page.version.version }}/delete.md %})) on the table. [#144443](https://github.com/cockroachdb/cockroach/issues/144443) +- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. [#145227](https://github.com/cockroachdb/cockroach/issues/145227) +- Only L2 distance (`<->`) searches are accelerated. [#144016](https://github.com/cockroachdb/cockroach/issues/144016) +- Index acceleration with filters is only supported if the filters match prefix columns. [#146145](https://github.com/cockroachdb/cockroach/issues/146145) +- Index recommendations are not provided for vector indexes. [#146146](https://github.com/cockroachdb/cockroach/issues/146146) +- Vector index queries may return incorrect results when the underlying table uses multiple column families. [#146046](https://github.com/cockroachdb/cockroach/issues/146046) +- Queries against a vector index may ignore filter conditions (e.g., a `WHERE` clause) when multiple vector indexes exist on the same `VECTOR` column, and one has a prefix column. [#146257](https://github.com/cockroachdb/cockroach/issues/146257) \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md new file mode 100644 index 00000000000..daea59ebf88 --- /dev/null +++ b/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md @@ -0,0 +1,2 @@ +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v25.3/ldr/create_logical_replication_stream_stmt.html b/src/current/_includes/v25.3/ldr/create_logical_replication_stream_stmt.html new file mode 100644 index 00000000000..f0b742f619e --- /dev/null +++ b/src/current/_includes/v25.3/ldr/create_logical_replication_stream_stmt.html @@ -0,0 +1,171 @@ + +
+ + + + + + CREATE + + + LOGICAL + + + REPLICATION + + + STREAM + + + FROM + + + TABLE + + + db_object_name + + + + TABLES + + + ( + + + logical_replication_resources_list + + + + ) + + + ON + + + source_connection_string + + + + INTO + + + TABLE + + + db_object_name + + + + TABLES + + + ( + + + logical_replication_resources_list + + + + ) + + + WITH + + + logical_replication_options + + + + , + + + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/ldr/create_logically_replicated_stmt.html b/src/current/_includes/v25.3/ldr/create_logically_replicated_stmt.html new file mode 100644 index 00000000000..03bca7093e7 --- /dev/null +++ b/src/current/_includes/v25.3/ldr/create_logically_replicated_stmt.html @@ -0,0 +1,149 @@ + +
+ + + + + + CREATE + + + LOGICALLY + + + REPLICATED + + + TABLE + + + db_object_name + + + TABLES + + + ( + + + logical_replication_resources_list + + + ) + + + FROM + + + TABLE + + + db_object_name + + + TABLES + + + ( + + + logical_replication_resources_list + + + ) + + + ON + + + source_connection_string + + + WITH + + + logical_replication_create_table_options + + + , + + + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/ldr/immediate-description.md b/src/current/_includes/v25.3/ldr/immediate-description.md new file mode 100644 index 00000000000..eb87361a009 --- /dev/null +++ b/src/current/_includes/v25.3/ldr/immediate-description.md @@ -0,0 +1 @@ +Attempts to replicate the changed row directly into the destination table, without re-running constraint validations. It does not support writing into tables with [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) constraints. \ No newline at end of file diff --git a/src/current/_includes/v25.3/ldr/multiple-tables.md b/src/current/_includes/v25.3/ldr/multiple-tables.md new file mode 100644 index 00000000000..0522fb605be --- /dev/null +++ b/src/current/_includes/v25.3/ldr/multiple-tables.md @@ -0,0 +1 @@ +There are some tradeoffs between enabling one table per LDR job versus multiple tables in one LDR job. Multiple tables in one LDR job can be easier to operate. For example, if you pause and resume the single job, LDR will stop and resume for all the tables. However, the most granular level observability will be at the job level. One table in one LDR job will allow for table-level observability. diff --git a/src/current/_includes/v25.3/ldr/note-manage-ldr.md b/src/current/_includes/v25.3/ldr/note-manage-ldr.md new file mode 100644 index 00000000000..d35b8c1002f --- /dev/null +++ b/src/current/_includes/v25.3/ldr/note-manage-ldr.md @@ -0,0 +1 @@ +For details on managing schema changes, conflicts, and jobs when LDR is running, refer to the [Manage Logical Data Replication]({% link {{ page.version.version }}/manage-logical-data-replication.md %}) page. \ No newline at end of file diff --git a/src/current/_includes/v25.3/ldr/show-logical-replication-responses.md b/src/current/_includes/v25.3/ldr/show-logical-replication-responses.md new file mode 100644 index 00000000000..33ec432b1bd --- /dev/null +++ b/src/current/_includes/v25.3/ldr/show-logical-replication-responses.md @@ -0,0 +1,9 @@ +Field | Response +---------+---------- +`job_id` | The job's ID. Use with [`CANCEL JOB`]({% link {{ page.version.version }}/cancel-job.md %}), [`PAUSE JOB`]({% link {{ page.version.version }}/pause-job.md %}), [`RESUME JOB`]({% link {{ page.version.version }}/resume-job.md %}), [`SHOW JOB`]({% link {{ page.version.version }}/show-jobs.md %}). +`status` | The job's current state. Possible values: `pending`, `paused`, `pause-requested`, `failed`, `succeeded`, `canceled`, `cancel-requested`, `running`, `retry-running`, `retry-reverting`, `reverting`, `revert-failed`.

Refer to [Jobs status]({% link {{ page.version.version }}/show-jobs.md %}#job-status) for a description of each status. +`tables` | The fully qualified name of the table(s) that are part of the LDR job. +`replicated_time` | The latest [timestamp]({% link {{ page.version.version }}/timestamp.md %}) at which the destination cluster has consistent data. This time advances automatically as long as the LDR job proceeds without error. `replicated_time` is updated periodically (every 30s). +`replication_start_time` | The start time of the LDR job. +`conflict_resolution_type` | The type of [conflict resolution]({% link {{ page.version.version }}/manage-logical-data-replication.md %}#conflict-resolution): `LWW` last write wins. +`command` | Description of the job including the replicating table(s) and the cluster connections. diff --git a/src/current/_includes/v25.3/ldr/show_logical_replication_jobs_stmt.html b/src/current/_includes/v25.3/ldr/show_logical_replication_jobs_stmt.html new file mode 100644 index 00000000000..50cef32cc22 --- /dev/null +++ b/src/current/_includes/v25.3/ldr/show_logical_replication_jobs_stmt.html @@ -0,0 +1,55 @@ + +
+ + + + + + SHOW + + + LOGICAL + + + REPLICATION + + + JOBS + + + WITH + + + show_logical_replication_jobs_options + + + + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/ldr/use-create-logically-replicated.md b/src/current/_includes/v25.3/ldr/use-create-logically-replicated.md new file mode 100644 index 00000000000..c294dcf4518 --- /dev/null +++ b/src/current/_includes/v25.3/ldr/use-create-logically-replicated.md @@ -0,0 +1 @@ +If your table does not contain any user-defined types or [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) dependencies, use the [`CREATE LOGICALLY REPLICATED`]({% link {{ page.version.version }}/create-logically-replicated.md %}) syntax to start the stream for a fast, offline initial scan and automatic destination table setup. \ No newline at end of file diff --git a/src/current/_includes/v25.3/ldr/validated-description.md b/src/current/_includes/v25.3/ldr/validated-description.md new file mode 100644 index 00000000000..4b7bb9a8b18 --- /dev/null +++ b/src/current/_includes/v25.3/ldr/validated-description.md @@ -0,0 +1 @@ +Attempts to apply the write in a similar way to a user-run query, which would re-run all constraint validations relevant to the destination table(s). If the change violates foreign key dependencies, unique constraints, or other constraints, the row will be put in the [dead letter queue (DLQ)]({% link {{ page.version.version }}/manage-logical-data-replication.md %}#dead-letter-queue-dlq) instead. Like the [SQL layer]({% link {{ page.version.version }}/architecture/sql-layer.md %}), `validated` mode does not recognize deletion tombstones. As a result, an update to the same key from cluster A will successfully apply on cluster B, even if that key was deleted on cluster B before the LDR job streamed the cluster A update to the key. \ No newline at end of file diff --git a/src/current/_includes/v25.3/leader-leases-intro.md b/src/current/_includes/v25.3/leader-leases-intro.md new file mode 100644 index 00000000000..e11070ebf62 --- /dev/null +++ b/src/current/_includes/v25.3/leader-leases-intro.md @@ -0,0 +1 @@ +CockroachDB offers an improved leasing system rebuilt atop a stronger form of [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) leadership that ensures that the Raft leader is always the range's leaseholder, except briefly during [lease transfers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#how-leases-are-transferred-from-a-dead-node). This type of lease is called a _Leader lease_, and supersedes the former system of having different epoch-based and expiration-based lease types, while combining the performance of the former with the resilience of the latter. diff --git a/src/current/_includes/v25.3/leader-leases-node-heartbeat-use-cases.md b/src/current/_includes/v25.3/leader-leases-node-heartbeat-use-cases.md new file mode 100644 index 00000000000..f6b248da5be --- /dev/null +++ b/src/current/_includes/v25.3/leader-leases-node-heartbeat-use-cases.md @@ -0,0 +1,7 @@ +For the purposes of [Raft replication]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) and determining the [leaseholder]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) of a [range]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range), node health is no longer determined by heartbeating a single "liveness range"; instead it is determined using [Leader leases]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leader-leases). + +However, node heartbeats of a single range are still used to determine: + +- Whether a node is still a member of a cluster (this is used by [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#node-decommission)). +- Whether a node is dead (in which case [its leases will be transferred away]({% link {{ page.version.version }}/architecture/replication-layer.md %}#how-leases-are-transferred-from-a-dead-node)). +- How to avoid placing replicas on dead, decommissioning or unhealthy nodes, and to make decisions about lease transfers. diff --git a/src/current/_includes/v25.3/metric-names-serverless.md b/src/current/_includes/v25.3/metric-names-serverless.md new file mode 100644 index 00000000000..33fab10e2a8 --- /dev/null +++ b/src/current/_includes/v25.3/metric-names-serverless.md @@ -0,0 +1,244 @@ +Name | Description +-----|----- +`addsstable.applications` | Number of SSTable ingestions applied (i.e., applied by Replicas) +`addsstable.copies` | number of SSTable ingestions that required copying files during application +`addsstable.proposals` | Number of SSTable ingestions proposed (i.e., sent to Raft by lease holders) +`admission.wait_sum.kv-stores` | Total wait time in micros +`admission.wait_sum.kv` | Total wait time in micros +`admission.wait_sum.sql-kv-response` | Total wait time in micros +`admission.wait_sum.sql-sql-response` | Total wait time in micros +`capacity.available` | Available storage capacity +`capacity.reserved` | Capacity reserved for snapshots +`capacity.used` | Used storage capacity +`capacity` | Total storage capacity +`changefeed.backfill_count` | Number of changefeeds currently executing backfill +`changefeed.backfill_pending_ranges` | Number of ranges in an ongoing backfill that are yet to be fully emitted +`changefeed.commit_latency` | Event commit latency: a difference between event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the earliest event in the batch and acknowledgement is recorded; Excludes latency during backfill +`changefeed.emitted_messages` | Messages emitted by all feeds +`changefeed.error_retries` | Total retryable errors encountered by all changefeeds +`changefeed.failures` | Total number of changefeed jobs which have failed +`changefeed.max_behind_nanos` | Largest commit-to-emit duration of any running feed +`changefeed.message_size_hist` | Message size histogram +`changefeed.running` | Number of currently running changefeeds, including sinkless +`clock-offset.meannanos` | Mean clock offset with other nodes +`clock-offset.stddevnanos` | Stddev clock offset with other nodes +`distsender.batches.partial` | Number of partial batches processed after being divided on range boundaries +`distsender.batches` | Number of batches processed +`distsender.errors.notleaseholder` | Number of NotLeaseHolderErrors encountered from replica-addressed RPCs +`distsender.rpc.sent.local` | Number of replica-addressed RPCs sent through the local-server optimization +`distsender.rpc.sent.nextreplicaerror` | Number of replica-addressed RPCs sent due to per-replica errors +`distsender.rpc.sent` | Number of replica-addressed RPCs sent +`exec.error` | Number of batch KV requests that failed to execute on this node. This count excludes transaction restart/abort errors. However, it will include other errors expected during normal operation, such as ConditionFailedError. This metric is thus not an indicator of KV health. +`exec.latency` | Latency of batch KV requests (including errors) executed on this node. This measures requests already addressed to a single replica, from the moment at which they arrive at the internal gRPC endpoint to the moment at which the response (or an error) is returned. This latency includes in particular commit waits, conflict resolution and replication, and end-users can easily produce high measurements via long-running transactions that conflict with foreground traffic. This metric thus does not provide a good signal for understanding the health of the KV layer. +`exec.success` | Number of batch KV requests executed successfully on this node. A request is considered to have executed 'successfully' if it either returns a result or a transaction restart/abort error. +`gcbytesage` | Cumulative age of non-live data +`gossip.bytes.received` | Number of received gossip bytes +`gossip.bytes.sent` | Number of sent gossip bytes +`gossip.connections.incoming` | Number of active incoming gossip connections +`gossip.connections.outgoing` | Number of active outgoing gossip connections +`gossip.connections.refused` | Number of refused incoming gossip connections +`gossip.infos.received` | Number of received gossip Info objects +`gossip.infos.sent` | Number of sent gossip Info objects +`intentage` | Cumulative age of intents +`intentbytes` | Number of bytes in intent KV pairs +`intentcount` | Count of intent keys +`jobs.changefeed.resume_retry_error` | Number of changefeed jobs which failed with a retryable error +`keybytes` | Number of bytes taken up by keys +`keycount` | Count of all keys +`leases.epoch` | Number of replica leaseholders using epoch-based leases +`leases.error` | Number of failed lease requests +`leases.expiration` | Number of replica leaseholders using expiration-based leases +`leases.success` | Number of successful lease requests +`leases.transfers.error` | Number of failed lease transfers +`leases.transfers.success` | Number of successful lease transfers +`livebytes` | Number of bytes of live data (keys plus values) +`livecount` | Count of live keys +`liveness.epochincrements` | Number of times this node has incremented its liveness epoch +`liveness.heartbeatfailures` | Number of failed node liveness heartbeats from this node +`liveness.heartbeatlatency` | Node liveness heartbeat latency +`liveness.heartbeatsuccesses` | Number of successful node liveness heartbeats from this node +`liveness.livenodes` | Number of live nodes in the cluster (will be 0 if this node is not itself live) +`queue.consistency.pending` | Number of pending replicas in the consistency checker queue +`queue.consistency.process.failure` | Number of replicas which failed processing in the consistency checker queue +`queue.consistency.process.success` | Number of replicas successfully processed by the consistency checker queue +`queue.consistency.processingnanos` | Nanoseconds spent processing replicas in the consistency checker queue +`queue.gc.info.abortspanconsidered` | Number of AbortSpan entries eligible for removal based on their ages +`queue.gc.info.abortspangcnum` | Number of AbortSpan entries fit for removal +`queue.gc.info.abortspanscanned` | Number of transactions present in the AbortSpan scanned from the engine +`queue.gc.info.intentsconsidered` | Number of intents eligible to be considered because they are at least two hours old +`queue.gc.info.intenttxns` | Number of associated distinct transactions +`queue.gc.info.numkeysaffected` | Number of keys with data that is eligible for garbage collection +`queue.gc.info.pushtxn` | Number of attempted pushes +`queue.gc.info.resolvesuccess` | Number of successful intent resolutions +`queue.gc.info.resolvetotal` | Number of attempted intent resolutions +`queue.gc.info.transactionspangcaborted` | Number of entries eligible for garbage collection that correspond to aborted txns +`queue.gc.info.transactionspangccommitted` | Number of entries eligible for garbage collection that correspond to committed txns +`queue.gc.info.transactionspangcpending` | Number of entries eligible for garbage collection that correspond to pending txns +`queue.gc.info.transactionspanscanned` | Number of entries in transaction spans scanned from the engine +`queue.gc.pending` | Number of pending replicas in the MVCC garbage collection queue +`queue.gc.process.failure` | Number of replicas which failed processing in the MVCC garbage collection queue +`queue.gc.process.success` | Number of replicas successfully processed by the MVCC garbage collection queue +`queue.gc.processingnanos` | Nanoseconds spent processing replicas in the MVCC garbage collection queue +`queue.raftlog.pending` | Number of pending replicas in the Raft log queue +`queue.raftlog.process.failure` | Number of replicas which failed processing in the Raft log queue +`queue.raftlog.process.success` | Number of replicas successfully processed by the Raft log queue +`queue.raftlog.processingnanos` | Nanoseconds spent processing replicas in the Raft log queue +`queue.raftsnapshot.pending` | Number of pending replicas in the Raft repair queue +`queue.raftsnapshot.process.failure` | Number of replicas which failed processing in the Raft repair queue +`queue.raftsnapshot.process.success` | Number of replicas successfully processed by the Raft repair queue +`queue.raftsnapshot.processingnanos` | Nanoseconds spent processing replicas in the Raft repair queue +`queue.replicagc.pending` | Number of pending replicas in the replica queue +`queue.replicagc.process.failure` | Number of replicas which failed processing in the replica garbage collection queue +`queue.replicagc.process.success` | Number of replicas successfully processed by the replica garbage collection queue +`queue.replicagc.processingnanos` | Nanoseconds spent processing replicas in the replica garbage collection queue +`queue.replicagc.removereplica` | Number of replica removals attempted by the replica garbage collection queue +`queue.replicate.addreplica` | Number of replica additions attempted by the replicate queue +`queue.replicate.pending` | Number of pending replicas in the replicate queue +`queue.replicate.process.failure` | Number of replicas which failed processing in the replicate queue +`queue.replicate.process.success` | Number of replicas successfully processed by the replicate queue +`queue.replicate.processingnanos` | Nanoseconds spent processing replicas in the replicate queue +`queue.replicate.purgatory` | Number of replicas in the replicate queue's purgatory, awaiting allocation options +`queue.replicate.rebalancereplica` | Number of replica rebalancer-initiated additions attempted by the replicate queue +`queue.replicate.removedeadreplica` | Number of dead replica removals attempted by the replicate queue (typically in response to a node outage) +`queue.replicate.removereplica` | Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition) +`queue.replicate.transferlease` | Number of range lease transfers attempted by the replicate queue +`queue.split.pending` | Number of pending replicas in the split queue +`queue.split.process.failure` | Number of replicas which failed processing in the split queue +`queue.split.process.success` | Number of replicas successfully processed by the split queue +`queue.split.processingnanos` | Nanoseconds spent processing replicas in the split queue +`queue.tsmaintenance.pending` | Number of pending replicas in the time series maintenance queue +`queue.tsmaintenance.process.failure` | Number of replicas which failed processing in the time series maintenance queue +`queue.tsmaintenance.process.success` | Number of replicas successfully processed by the time series maintenance queue +`queue.tsmaintenance.processingnanos` | Nanoseconds spent processing replicas in the time series maintenance queue +`raft.commandsapplied` | Count of Raft commands applied. This measurement is taken on the Raft apply loops of all Replicas (leaders and followers alike), meaning that it does not measure the number of Raft commands *proposed* (in the hypothetical extreme case, all Replicas may apply all commands through snapshots, thus not increasing this metric at all). Instead, it is a proxy for how much work is being done advancing the Replica state machines on this node. +`raft.heartbeats.pending` | Number of pending heartbeats and responses waiting to be coalesced +`raft.process.commandcommit.latency` | Latency histogram for applying a batch of Raft commands to the state machine. This metric is misnamed: it measures the latency for *applying* a batch of committed Raft commands to a Replica state machine. This requires only non-durable I/O (except for replication configuration changes). Note that a "batch" in this context is really a sub-batch of the batch received for application during Raft ready handling. The 'raft.process.applycommitted.latency' histogram is likely more suitable in most cases, as it measures the total latency across all sub-batches (i.e., the sum of commandcommit.latency for a complete batch). +`raft.process.logcommit.latency` | Latency histogram for committing Raft log entries to stable storage. This measures the latency of durably committing a group of newly received Raft entries as well as the HardState entry to disk. This excludes any data processing, i.e., we measure purely the commit latency of the resulting Engine write. Homogeneous bands of p50-p99 latencies (in the presence of regular Raft traffic), make it likely that the storage layer is healthy. Spikes in the latency bands can either hint at the presence of large sets of Raft entries being received, or at performance issues at the storage layer. +`raft.process.tickingnanos` | Nanoseconds spent in store.processRaft() processing replica.Tick() +`raft.process.workingnanos` | Nanoseconds spent in store.processRaft() working. This is the sum of the measurements passed to the raft.process.handleready.latency histogram. +`raft.rcvd.app` | Number of MsgApp messages received by this store +`raft.rcvd.appresp` | Number of MsgAppResp messages received by this store +`raft.rcvd.dropped` | Number of incoming Raft messages dropped (due to queue length or size) +`raft.rcvd.heartbeat` | Number of (coalesced, if enabled) MsgHeartbeat messages received by this store +`raft.rcvd.heartbeatresp` | Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store +`raft.rcvd.prevote` | Number of MsgPreVote messages received by this store +`raft.rcvd.prevoteresp` | Number of MsgPreVoteResp messages received by this store +`raft.rcvd.prop` | Number of MsgProp messages received by this store +`raft.rcvd.snap` | Number of MsgSnap messages received by this store +`raft.rcvd.timeoutnow` | Number of MsgTimeoutNow messages received by this store +`raft.rcvd.transferleader` | Number of MsgTransferLeader messages received by this store +`raft.rcvd.vote` | Number of MsgVote messages received by this store +`raft.rcvd.voteresp` | Number of MsgVoteResp messages received by this store +`raft.ticks` | Number of Raft ticks queued +`raftlog.behind` | Number of Raft log entries followers on other stores are behind. This gauge provides a view of the aggregate number of log entries the Raft leaders on this node think the followers are behind. Since a Raft leader may not always have a good estimate for this information for all of its followers, and since followers are expected to be behind (when they are not required as part of a quorum) *and* the aggregate thus scales like the count of such followers, it is difficult to meaningfully interpret this metric. +`raftlog.truncated` | Number of Raft log entries truncated +`range.adds` | Number of range additions +`range.raftleadertransfers` | Number of Raft leader transfers +`range.removes` | Number of range removals +`range.snapshots.generated` | Number of generated snapshots +`range.snapshots.recv-in-progress` | Number of non-empty snapshots in progress on a receiver store +`range.snapshots.recv-queue` | Number of queued non-empty snapshots on a receiver store +`range.snapshots.recv-total-in-progress` | Number of empty and non-empty snapshots in progress on a receiver store +`range.snapshots.send-in-progress` | Number of non-empty snapshots in progress on a sender store +`range.snapshots.send-queue` | Number of queued non-empty snapshots on a sender store +`range.snapshots.send-total-in-progress` | Number of empty and non-empty in-progress on a sender store +`range.splits` | Number of range splits +`ranges.overreplicated` | Number of ranges with more live replicas than the replication target +`ranges.unavailable` | Number of ranges with fewer live replicas than needed for quorum +`ranges.underreplicated` | Number of ranges with fewer live replicas than the replication target +`ranges` | Number of ranges +`rebalancing.writespersecond` | Number of keys written (i.e., applied by raft) per second to the store, averaged over a large time period as used in rebalancing decisions +`replicas.leaders_not_leaseholders` | Number of replicas that are Raft leaders whose range lease is held by another store +`replicas.leaders` | Number of Raft leaders +`replicas.leaseholders` | Number of lease holders +`replicas.quiescent` | Number of quiesced replicas +`replicas.reserved` | Number of replicas reserved for snapshots +`replicas` | Number of replicas +`requests.backpressure.split` | Number of backpressured writes waiting on a range split. A range will backpressure (roughly) non-system traffic when the range is above the configured size until the range splits. When the rate of this metric is nonzero over extended periods of time, it should be investigated why splits are not occurring. +`requests.slow.distsender` | Number of replica-bound RPCs currently stuck or retrying for a long time. Note that this is not a good signal for KV health. The remote side of the RPCs tracked here may experience contention, so an end user can easily cause values for this metric to be emitted by leaving a transaction open for a long time and contending with it using a second transaction. +`requests.slow.lease` | Number of requests that have been stuck for a long time acquiring a lease. A nonzero value usually indicates range or replica unavailability, and should be investigated. Commonly, `requests.slow.raft` is also a nonzero value, which indicates that the lease requests are not getting a timely response from the replication layer. +`requests.slow.raft` | Number of requests that have been stuck for a long time in the replication layer. An (evaluated) request has to pass through the replication layer, notably the quota pool and Raft. If it fails to do so within a highly permissive duration, this metric is incremented (and decremented again once the request is either applied or returns an error). A nonzero value indicates range or replica unavailability, and should be investigated. +`rocksdb.block.cache.hits` | Count of block cache hits +`rocksdb.block.cache.misses` | Count of block cache misses +`rocksdb.block.cache.pinned-usage` | Bytes pinned by the block cache +`rocksdb.block.cache.usage` | Bytes used by the block cache +`rocksdb.bloom.filter.prefix.checked` | Number of times the bloom filter was checked +`rocksdb.bloom.filter.prefix.useful` | Number of times the bloom filter helped avoid iterator creation +`rocksdb.compactions` | Number of table compactions +`rocksdb.flushes` | Number of table flushes +`rocksdb.memtable.total-size` | Current size of memtable in bytes +`rocksdb.num-sstables` | Number of storage engine SSTables +`rocksdb.read-amplification` | Number of disk reads per query +`rocksdb.table-readers-mem-estimate` | Memory used by index and filter blocks +`round-trip-latency` | Distribution of round-trip latencies with other nodes +`sql.bytesin` | Number of sql bytes received +`sql.bytesout` | Number of sql bytes sent +`sql.conn.latency` | Latency to establish and authenticate a SQL connection +`sql.conns` | Number of active sql connections +`sql.ddl.count` | Number of SQL DDL statements successfully executed +`sql.delete.count` | Number of SQL DELETE statements successfully executed +`sql.distsql.contended_queries.count` | Number of SQL queries that experienced contention +`sql.distsql.exec.latency` | Latency of DistSQL statement execution +`sql.distsql.flows.active` | Number of distributed SQL flows currently active +`sql.distsql.flows.total` | Number of distributed SQL flows executed +`sql.distsql.queries.active` | Number of SQL queries currently active +`sql.distsql.queries.total` | Number of SQL queries executed +`sql.distsql.select.count` | Number of DistSQL SELECT statements +`sql.distsql.service.latency` | Latency of DistSQL request execution +`sql.exec.latency` | Latency of SQL statement execution +`sql.failure.count` | Number of statements resulting in a planning or runtime error +`sql.full.scan.count` | Number of full table or index scans +`sql.insert.count` | Number of SQL INSERT statements successfully executed +`sql.mem.distsql.current` | Current sql statement memory usage for distsql +`sql.mem.distsql.max` | Memory usage per sql statement for distsql +`sql.mem.internal.session.current` | Current sql session memory usage for internal +`sql.mem.internal.session.max` | Memory usage per sql session for internal +`sql.mem.internal.txn.current` | Current sql transaction memory usage for internal +`sql.mem.internal.txn.max` | Memory usage per sql transaction for internal +`sql.misc.count` | Number of other SQL statements successfully executed +`sql.query.count` | Number of SQL queries executed +`sql.select.count` | Number of SQL SELECT statements successfully executed +`sql.service.latency` | Latency of SQL request execution +`sql.statements.active` | Number of currently active user SQL statements +`sql.txn.abort.count` | Number of SQL transaction abort errors +`sql.txn.begin.count` | Number of SQL transaction BEGIN statements successfully executed +`sql.txn.commit.count` | Number of SQL transaction COMMIT statements successfully executed +`sql.txn.latency` | Latency of SQL transactions +`sql.txn.rollback.count` | Number of SQL transaction ROLLBACK statements successfully executed +`sql.txns.open` | Number of currently open user SQL transactions +`sql.update.count` | Number of SQL UPDATE statements successfully executed +`sys.cgo.allocbytes` | Current bytes of memory allocated by cgo +`sys.cgo.totalbytes` | Total bytes of memory allocated by cgo, but not released +`sys.cgocalls` | Total number of cgo calls +`sys.cpu.combined.percent-normalized` | Current user+system cpu percentage, normalized 0-1 by number of cores +`sys.cpu.sys.ns` | Total system cpu time +`sys.cpu.sys.percent` | Current system cpu percentage +`sys.cpu.user.ns` | Total user cpu time +`sys.cpu.user.percent` | Current user cpu percentage +`sys.fd.open` | Process open file descriptors +`sys.fd.softlimit` | Process open FD soft limit +`sys.gc.count` | Total number of garbage collection runs +`sys.gc.pause.ns` | Total garbage collection pause +`sys.gc.pause.percent` | Current garbage collection pause percentage +`sys.go.allocbytes` | Current bytes of memory allocated by go +`sys.go.totalbytes` | Total bytes of memory allocated by go, but not released +`sys.goroutines` | Current number of goroutines +`sys.host.net.recv.bytes` | Bytes received on all network interfaces since this process started +`sys.host.net.send.bytes` | Bytes sent on all network interfaces since this process started +`sys.rss` | Current process RSS +`sys.uptime` | Process uptime +`sysbytes` | Number of bytes in system KV pairs +`syscount` | Count of system KV pairs +`timeseries.write.bytes` | Total size in bytes of metric samples written to disk +`timeseries.write.errors` | Total errors encountered while attempting to write metrics to disk +`timeseries.write.samples` | Total number of metric samples written to disk +`totalbytes` | Total number of bytes taken up by keys and values including non-live data +`txn.aborts` | Number of aborted KV transactions +`txn.commits1PC` | Number of KV transaction one-phase commit attempts +`txn.commits` | Number of committed KV transactions (including 1PC) +`txn.durations` | KV transaction durations +`txn.restarts.serializable` | Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE +`txn.restarts.writetooold` | Number of restarts due to a concurrent writer committing first +`txn.restarts` | Number of restarted KV transactions +`valbytes` | Number of bytes taken up by values +`valcount` | Count of all values diff --git a/src/current/_includes/v25.3/metric-names.md b/src/current/_includes/v25.3/metric-names.md new file mode 100644 index 00000000000..d31cd859b9c --- /dev/null +++ b/src/current/_includes/v25.3/metric-names.md @@ -0,0 +1,30 @@ +{% assign version = page.version.version | replace: ".", "" %} +{% assign list1 = site.data[version].metrics.available-metrics-in-metrics-list %} +{% assign list2 = site.data[version].metrics.available-metrics-not-in-metrics-list %} + +{% assign available_metrics_combined = list1 | concat: list2 %} +{% assign available_metrics_sorted = available_metrics_combined | sort: "metric_id" %} + + + + + + + + + + + + {% for m in available_metrics_sorted %} {% comment %} Iterate through the available_metrics. {% endcomment %} + {% assign metrics-list = site.data[version].metrics.metrics-list | where: "metric", m.metric_id %} + {% comment %} Get the row from the metrics-list with the given metric_id. {% endcomment %} + + + {% comment %} Use the value from the metrics-list, if any, followed by the value in the available-metrics-not-in-metrics-list, if any. {% endcomment %} + + + + + {% endfor %} {% comment %} metrics {% endcomment %} + +
CockroachDB Metric NameDescriptionTypeUnit
{{ m.metric_id }}
{{ metrics-list[0].description }}{{ m.description }}{{ metrics-list[0].type }}{{ m.type }}{{ metrics-list[0].unit }}{{ m.unit }}
diff --git a/src/current/_includes/v25.3/migration/load-data-copy-from.md b/src/current/_includes/v25.3/migration/load-data-copy-from.md new file mode 100644 index 00000000000..37d19c285fd --- /dev/null +++ b/src/current/_includes/v25.3/migration/load-data-copy-from.md @@ -0,0 +1 @@ +When migrating from PostgreSQL, you can use [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) to copy CSV or tab-delimited data to your CockroachDB tables. This option enables your tables to remain online and accessible. However, it is slower than using [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/migration/load-data-import-into.md b/src/current/_includes/v25.3/migration/load-data-import-into.md new file mode 100644 index 00000000000..174b1bd33f4 --- /dev/null +++ b/src/current/_includes/v25.3/migration/load-data-import-into.md @@ -0,0 +1 @@ +Use [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) to migrate [CSV]({% link {{ page.version.version }}/migrate-from-csv.md %}), TSV, or [Avro]({% link {{ page.version.version }}/migrate-from-avro.md %}) data stored via [userfile]({% link {{ page.version.version }}/use-userfile-storage.md %}) or [cloud storage]({% link {{ page.version.version }}/use-cloud-storage.md %}) into pre-existing tables on CockroachDB. This option achieves the highest throughput, but [requires taking the CockroachDB tables **offline**]({% link {{ page.version.version }}/import-into.md %}#considerations) to achieve its import speed. \ No newline at end of file diff --git a/src/current/_includes/v25.3/migration/load-data-third-party.md b/src/current/_includes/v25.3/migration/load-data-third-party.md new file mode 100644 index 00000000000..b8f0cf70f17 --- /dev/null +++ b/src/current/_includes/v25.3/migration/load-data-third-party.md @@ -0,0 +1 @@ +Use a [third-party data migration tool]({% link {{ page.version.version }}/third-party-database-tools.md %}#data-migration-tools) (e.g., [AWS DMS]({% link {{ page.version.version }}/aws-dms.md %}), [Qlik]({% link {{ page.version.version }}/qlik.md %}), [Striim](striim.html)) to load the data. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/assume-role-description.md b/src/current/_includes/v25.3/misc/assume-role-description.md new file mode 100644 index 00000000000..ea6ca882975 --- /dev/null +++ b/src/current/_includes/v25.3/misc/assume-role-description.md @@ -0,0 +1 @@ +Pass the [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the role to assume. Use in combination with `AUTH=implicit` or `specified`.
`external_id`: Use as a value to `ASSUME_ROLE` to specify the [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) for third-party access to your S3 bucket. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/auth-intro-examples.md b/src/current/_includes/v25.3/misc/auth-intro-examples.md new file mode 100644 index 00000000000..27b7fb4484c --- /dev/null +++ b/src/current/_includes/v25.3/misc/auth-intro-examples.md @@ -0,0 +1,3 @@ +These examples use the **default** `AUTH=specified` parameter. For more detail on how to use `implicit` authentication with Amazon S3 buckets, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}). + +CockroachDB supports assume role authentication. This allows you to limit the control specific users have over your storage buckets. See [Assume role authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) for more information. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/available-capacity-metric.md b/src/current/_includes/v25.3/misc/available-capacity-metric.md new file mode 100644 index 00000000000..d3bb8ffae1e --- /dev/null +++ b/src/current/_includes/v25.3/misc/available-capacity-metric.md @@ -0,0 +1 @@ +If you are testing your deployment locally with multiple CockroachDB nodes running on a single machine (this is [not recommended in production]({% link {{ page.version.version }}/recommended-production-settings.md %}#topology)), you must explicitly [set the store size]({% link {{ page.version.version }}/cockroach-start.md %}#store) per node in order to display the correct capacity. Otherwise, the machine's actual disk capacity will be counted as a separate store for each node, thus inflating the computed capacity. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/aws-locations.md b/src/current/_includes/v25.3/misc/aws-locations.md new file mode 100644 index 00000000000..8b073c1f230 --- /dev/null +++ b/src/current/_includes/v25.3/misc/aws-locations.md @@ -0,0 +1,18 @@ +| Location | SQL Statement | +| ------ | ------ | +| US East (N. Virginia) | `INSERT into system.locations VALUES ('region', 'us-east-1', 37.478397, -76.453077)`| +| US East (Ohio) | `INSERT into system.locations VALUES ('region', 'us-east-2', 40.417287, -76.453077)` | +| US West (N. California) | `INSERT into system.locations VALUES ('region', 'us-west-1', 38.837522, -120.895824)` | +| US West (Oregon) | `INSERT into system.locations VALUES ('region', 'us-west-2', 43.804133, -120.554201)` | +| Canada (Central) | `INSERT into system.locations VALUES ('region', 'ca-central-1', 56.130366, -106.346771)` | +| EU (Frankfurt) | `INSERT into system.locations VALUES ('region', 'eu-central-1', 50.110922, 8.682127)` | +| EU (Ireland) | `INSERT into system.locations VALUES ('region', 'eu-west-1', 53.142367, -7.692054)` | +| EU (London) | `INSERT into system.locations VALUES ('region', 'eu-west-2', 51.507351, -0.127758)` | +| EU (Paris) | `INSERT into system.locations VALUES ('region', 'eu-west-3', 48.856614, 2.352222)` | +| Asia Pacific (Tokyo) | `INSERT into system.locations VALUES ('region', 'ap-northeast-1', 35.689487, 139.691706)` | +| Asia Pacific (Seoul) | `INSERT into system.locations VALUES ('region', 'ap-northeast-2', 37.566535, 126.977969)` | +| Asia Pacific (Osaka-Local) | `INSERT into system.locations VALUES ('region', 'ap-northeast-3', 34.693738, 135.502165)` | +| Asia Pacific (Singapore) | `INSERT into system.locations VALUES ('region', 'ap-southeast-1', 1.352083, 103.819836)` | +| Asia Pacific (Sydney) | `INSERT into system.locations VALUES ('region', 'ap-southeast-2', -33.86882, 151.209296)` | +| Asia Pacific (Mumbai) | `INSERT into system.locations VALUES ('region', 'ap-south-1', 19.075984, 72.877656)` | +| South America (São Paulo) | `INSERT into system.locations VALUES ('region', 'sa-east-1', -23.55052, -46.633309)` | diff --git a/src/current/_includes/v25.3/misc/azure-blob.md b/src/current/_includes/v25.3/misc/azure-blob.md new file mode 100644 index 00000000000..3449e639f7a --- /dev/null +++ b/src/current/_includes/v25.3/misc/azure-blob.md @@ -0,0 +1 @@ +For [changefeeds]({% link {{ page.version.version }}/changefeed-sinks.md %}), you must use the `azure://` scheme. For all other jobs, the `azure://` and `azure-storage://` schemes are also supported for backward compatibility, though `azure-blob://` is recommended. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/azure-env-param.md b/src/current/_includes/v25.3/misc/azure-env-param.md new file mode 100644 index 00000000000..62b6b01293e --- /dev/null +++ b/src/current/_includes/v25.3/misc/azure-env-param.md @@ -0,0 +1 @@ +The [Azure environment](https://learn.microsoft.com/azure/deployment-environments/concept-environments-key-concepts#environments) that the storage account belongs to. The accepted values are: `AZURECHINACLOUD`, `AZUREGERMANCLOUD`, `AZUREPUBLICCLOUD`, and [`AZUREUSGOVERNMENTCLOUD`](https://learn.microsoft.com/azure/azure-government/documentation-government-developer-guide). These are cloud environments that meet security, compliance, and data privacy requirements for the respective instance of Azure cloud. If the parameter is not specified, it will default to `AZUREPUBLICCLOUD`. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/azure-locations.md b/src/current/_includes/v25.3/misc/azure-locations.md new file mode 100644 index 00000000000..7119ff8b7cb --- /dev/null +++ b/src/current/_includes/v25.3/misc/azure-locations.md @@ -0,0 +1,30 @@ +| Location | SQL Statement | +| -------- | ------------- | +| eastasia (East Asia) | `INSERT into system.locations VALUES ('region', 'eastasia', 22.267, 114.188)` | +| southeastasia (Southeast Asia) | `INSERT into system.locations VALUES ('region', 'southeastasia', 1.283, 103.833)` | +| centralus (Central US) | `INSERT into system.locations VALUES ('region', 'centralus', 41.5908, -93.6208)` | +| eastus (East US) | `INSERT into system.locations VALUES ('region', 'eastus', 37.3719, -79.8164)` | +| eastus2 (East US 2) | `INSERT into system.locations VALUES ('region', 'eastus2', 36.6681, -78.3889)` | +| westus (West US) | `INSERT into system.locations VALUES ('region', 'westus', 37.783, -122.417)` | +| northcentralus (North Central US) | `INSERT into system.locations VALUES ('region', 'northcentralus', 41.8819, -87.6278)` | +| southcentralus (South Central US) | `INSERT into system.locations VALUES ('region', 'southcentralus', 29.4167, -98.5)` | +| northeurope (North Europe) | `INSERT into system.locations VALUES ('region', 'northeurope', 53.3478, -6.2597)` | +| westeurope (West Europe) | `INSERT into system.locations VALUES ('region', 'westeurope', 52.3667, 4.9)` | +| japanwest (Japan West) | `INSERT into system.locations VALUES ('region', 'japanwest', 34.6939, 135.5022)` | +| japaneast (Japan East) | `INSERT into system.locations VALUES ('region', 'japaneast', 35.68, 139.77)` | +| brazilsouth (Brazil South) | `INSERT into system.locations VALUES ('region', 'brazilsouth', -23.55, -46.633)` | +| australiaeast (Australia East) | `INSERT into system.locations VALUES ('region', 'australiaeast', -33.86, 151.2094)` | +| australiasoutheast (Australia Southeast) | `INSERT into system.locations VALUES ('region', 'australiasoutheast', -37.8136, 144.9631)` | +| southindia (South India) | `INSERT into system.locations VALUES ('region', 'southindia', 12.9822, 80.1636)` | +| centralindia (Central India) | `INSERT into system.locations VALUES ('region', 'centralindia', 18.5822, 73.9197)` | +| westindia (West India) | `INSERT into system.locations VALUES ('region', 'westindia', 19.088, 72.868)` | +| canadacentral (Canada Central) | `INSERT into system.locations VALUES ('region', 'canadacentral', 43.653, -79.383)` | +| canadaeast (Canada East) | `INSERT into system.locations VALUES ('region', 'canadaeast', 46.817, -71.217)` | +| uksouth (UK South) | `INSERT into system.locations VALUES ('region', 'uksouth', 50.941, -0.799)` | +| ukwest (UK West) | `INSERT into system.locations VALUES ('region', 'ukwest', 53.427, -3.084)` | +| westcentralus (West Central US) | `INSERT into system.locations VALUES ('region', 'westcentralus', 40.890, -110.234)` | +| westus2 (West US 2) | `INSERT into system.locations VALUES ('region', 'westus2', 47.233, -119.852)` | +| koreacentral (Korea Central) | `INSERT into system.locations VALUES ('region', 'koreacentral', 37.5665, 126.9780)` | +| koreasouth (Korea South) | `INSERT into system.locations VALUES ('region', 'koreasouth', 35.1796, 129.0756)` | +| francecentral (France Central) | `INSERT into system.locations VALUES ('region', 'francecentral', 46.3772, 2.3730)` | +| francesouth (France South) | `INSERT into system.locations VALUES ('region', 'francesouth', 43.8345, 2.1972)` | diff --git a/src/current/_includes/v25.3/misc/basic-terms.md b/src/current/_includes/v25.3/misc/basic-terms.md new file mode 100644 index 00000000000..48a2bdf5206 --- /dev/null +++ b/src/current/_includes/v25.3/misc/basic-terms.md @@ -0,0 +1,38 @@ +### Cluster +A group of interconnected CockroachDB nodes that function as a single distributed SQL database server. Nodes collaboratively organize transactions, and rebalance workload and data storage to optimize performance and fault-tolerance. + +Each cluster has its own authorization hierarchy, meaning that users and roles must be defined on that specific cluster. + +A CockroachDB cluster can be run in CockroachDB Cloud, within a customer [Organization]({% link {{ page.version.version }}/architecture/glossary.md %}#organization), or can be self-hosted. + +### Node +An individual instance of CockroachDB. One or more nodes form a cluster. + +### Range + +CockroachDB stores all user data (tables, indexes, etc.) and almost all system data in a sorted map of key-value pairs. This keyspace is divided into contiguous chunks called _ranges_, such that every key is found in one range. + +From a SQL perspective, a table and its secondary indexes initially map to a single range, where each key-value pair in the range represents a single row in the table (also called the _primary index_ because the table is sorted by the primary key) or a single row in a secondary index. As soon as the size of a range reaches [the default range size]({% link {{ page.version.version }}/configure-replication-zones.md %}#range-max-bytes), it is [split into two ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-splits). This process continues for these new ranges as the table and its indexes continue growing. + +### Replica + +A copy of a range stored on a node. By default, there are three [replicas]({% link {{ page.version.version }}/configure-replication-zones.md %}#num_replicas) of each range on different nodes. + +### Leaseholder + +The replica that holds the "range lease." This replica receives and coordinates all read and write requests for the range. + +For most types of tables and queries, the leaseholder is the only replica that can serve consistent reads (reads that return "the latest" data). + +The leaseholder is always the same replica as the [Raft leader](#architecture-raft-leader), except briefly during [lease transfers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#how-leases-are-transferred-from-a-dead-node). For more information, refer to [Leader leases]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leader-leases). + +### Raft protocol + +The [consensus protocol]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) employed in CockroachDB that ensures that your data is safely stored on multiple nodes and that those nodes agree on the current state even if some of them are temporarily disconnected. + +### Raft leader + +For each range, the replica that is the "leader" for write requests. The leader uses the Raft protocol to ensure that a majority of replicas (the leader and enough followers) agree, based on their Raft logs, before committing the write. The Raft leader is always the same replica as the [leaseholder](#architecture-raft-leader). For more information, refer to [Leader leases]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leader-leases). + +### Raft log +A time-ordered log of writes to a range that its replicas have agreed on. This log exists on-disk with each replica and is the range's source of truth for consistent replication. diff --git a/src/current/_includes/v25.3/misc/beta-release-warning.md b/src/current/_includes/v25.3/misc/beta-release-warning.md new file mode 100644 index 00000000000..c228f650d04 --- /dev/null +++ b/src/current/_includes/v25.3/misc/beta-release-warning.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +Beta releases are intended for testing and experimentation only. Beta releases are not recommended for production use, as they can lead to data corruption, cluster unavailability, performance issues, etc. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/beta-warning.md b/src/current/_includes/v25.3/misc/beta-warning.md new file mode 100644 index 00000000000..4a5b9e3c6ae --- /dev/null +++ b/src/current/_includes/v25.3/misc/beta-warning.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +**This is a beta feature.** It is currently undergoing continued testing. Please [file a Github issue]({% link {{ page.version.version }}/file-an-issue.md %}) with us if you identify a bug. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/bulk-permission-note.md b/src/current/_includes/v25.3/misc/bulk-permission-note.md new file mode 100644 index 00000000000..cb008696def --- /dev/null +++ b/src/current/_includes/v25.3/misc/bulk-permission-note.md @@ -0,0 +1 @@ +We recommend using [cloud storage]({% link {{ page.version.version }}/use-cloud-storage.md %}). You also need to ensure that the permissions at your storage destination are configured for the operation. See [Storage Permissions]({% link {{ page.version.version }}/use-cloud-storage.md %}#storage-permissions) for a list of the necessary permissions that each bulk operation requires. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/cert-auth-using-x509-subject.md b/src/current/_includes/v25.3/misc/cert-auth-using-x509-subject.md new file mode 100644 index 00000000000..281feb1986f --- /dev/null +++ b/src/current/_includes/v25.3/misc/cert-auth-using-x509-subject.md @@ -0,0 +1 @@ +If you manage your own Certificate Authority (CA) infrastructure, CockroachDB supports mapping between the Subject field of your [X.509 certificates](https://en.wikipedia.org/wiki/X.509) and SQL [roles]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles). For more information, see [Certificate-based authentication using multiple values from the X.509 Subject field]({% link {{page.version.version}}/certificate-based-authentication-using-the-x509-subject-field.md %}). diff --git a/src/current/_includes/v25.3/misc/chrome-localhost.md b/src/current/_includes/v25.3/misc/chrome-localhost.md new file mode 100644 index 00000000000..d794ff339d0 --- /dev/null +++ b/src/current/_includes/v25.3/misc/chrome-localhost.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If you are using Google Chrome, and you are getting an error about not being able to reach `localhost` because its certificate has been revoked, go to chrome://flags/#allow-insecure-localhost, enable "Allow invalid certificates for resources loaded from localhost", and then restart the browser. Enabling this Chrome feature degrades security for all sites running on `localhost`, not just CockroachDB's DB Console, so be sure to enable the feature only temporarily. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/csv-import-callout.md b/src/current/_includes/v25.3/misc/csv-import-callout.md new file mode 100644 index 00000000000..60555c5d0b6 --- /dev/null +++ b/src/current/_includes/v25.3/misc/csv-import-callout.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +The column order in your schema must match the column order in the file being imported. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/customizing-the-savepoint-name.md b/src/current/_includes/v25.3/misc/customizing-the-savepoint-name.md new file mode 100644 index 00000000000..6a00f8f6d8c --- /dev/null +++ b/src/current/_includes/v25.3/misc/customizing-the-savepoint-name.md @@ -0,0 +1,5 @@ +Set the `force_savepoint_restart` [session variable]({% link {{ page.version.version }}/set-vars.md %}#supported-variables) to `true` to enable using a custom name for the [retry savepoint]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}#retry-savepoints). + +Once this variable is set, the [`SAVEPOINT`]({% link {{ page.version.version }}/savepoint.md %}) statement will accept any name for the retry savepoint, not just `cockroach_restart`. In addition, it causes every savepoint name to be equivalent to `cockroach_restart`, therefore disallowing the use of [nested transactions]({% link {{ page.version.version }}/transactions.md %}#nested-transactions). + +This feature exists to support applications that want to use the [advanced client-side transaction retry protocol]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}), but cannot customize the name of savepoints to be `cockroach_restart`. For example, this may be necessary because you are using an ORM that requires its own names for savepoints. diff --git a/src/current/_includes/v25.3/misc/database-terms.md b/src/current/_includes/v25.3/misc/database-terms.md new file mode 100644 index 00000000000..78663985607 --- /dev/null +++ b/src/current/_includes/v25.3/misc/database-terms.md @@ -0,0 +1,30 @@ +### Consistency +The requirement that a transaction must change affected data only in allowed ways. CockroachDB uses "consistency" in both the sense of [ACID semantics](https://en.wikipedia.org/wiki/ACID) and the [CAP theorem](https://wikipedia.org/wiki/CAP_theorem), albeit less formally than either definition. + +### Isolation +The degree to which a transaction may be affected by other transactions running at the same time. CockroachDB provides the [`SERIALIZABLE`](https://wikipedia.org/wiki/Serializability) and `READ COMMITTED` isolation levels. For more information, see [Isolation levels]({% link {{ page.version.version }}/transactions.md %}#isolation-levels). + +### Consensus + The process of reaching agreement on whether a transaction is committed or aborted. CockroachDB uses the [Raft consensus protocol](#architecture-raft). In CockroachDB, when a range receives a write, a quorum of nodes containing replicas of the range acknowledge the write. This means your data is safely stored and a majority of nodes agree on the database's current state, even if some of the nodes are offline. + +When a write does not achieve consensus, forward progress halts to maintain consistency within the cluster. + +### Replication +The process of creating and distributing copies of data, as well as ensuring that those copies remain consistent. CockroachDB requires all writes to propagate to a [quorum](https://wikipedia.org/wiki/Quorum_%28distributed_computing%29) of copies of the data before being considered committed. This ensures the consistency of your data. + +### Transaction +A set of operations performed on a database that satisfy the requirements of [ACID semantics](https://en.wikipedia.org/wiki/ACID). This is a crucial feature for a consistent system to ensure developers can trust the data in their database. For more information about how transactions work in CockroachDB, see [Transaction Layer]({% link {{ page.version.version }}/architecture/transaction-layer.md %}). + +### Transaction contention + A [state of conflict]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) that occurs when: + +- A [transaction]({% link {{ page.version.version }}/transactions.md %}) is unable to complete due to another concurrent or recent transaction attempting to write to the same data. This is also called *lock contention*. +- A transaction is [automatically retried]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) because it could not be placed into a [serializable ordering]({% link {{ page.version.version }}/demo-serializable.md %}) among all of the currently executing transactions. This is also called a *serialization conflict*. If the automatic retry is not possible or fails, a [*transaction retry error*](../transaction-retry-error-reference.html) is emitted to the client, requiring a client application running under `SERIALIZABLE` isolation to [retry the transaction](../transaction-retry-error-reference.html#client-side-retry-handling). + +Steps should be taken to [reduce transaction contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#reduce-transaction-contention) in the first place. + +### Multi-active availability +A consensus-based notion of high availability that lets each node in the cluster handle reads and writes for a subset of the stored data (on a per-range basis). This is in contrast to _active-passive replication_, in which the active node receives 100% of request traffic, and _active-active_ replication, in which all nodes accept requests but typically cannot guarantee that reads are both up-to-date and fast. + +### User +A SQL user is an identity capable of executing SQL statements and performing other cluster actions against CockroachDB clusters. SQL users must authenticate with an option permitted on the cluster (username/password, single sign-on (SSO), or certificate). Note that a SQL/cluster user is distinct from a CockroachDB {{ site.data.products.cloud }} organization user. diff --git a/src/current/_includes/v25.3/misc/debug-subcommands.md b/src/current/_includes/v25.3/misc/debug-subcommands.md new file mode 100644 index 00000000000..25feb08481b --- /dev/null +++ b/src/current/_includes/v25.3/misc/debug-subcommands.md @@ -0,0 +1,5 @@ +While the `cockroach debug` command has a few subcommands, users are expected to use only the [`zip`]({% link {{ page.version.version }}/cockroach-debug-zip.md %}), [`encryption-active-key`]({% link {{ page.version.version }}/cockroach-debug-encryption-active-key.md %}), [`merge-logs`]({% link {{ page.version.version }}/cockroach-debug-merge-logs.md %}), [`list-files`](cockroach-debug-list-files.html), [`tsdump`](cockroach-debug-tsdump.html), and [`ballast`](cockroach-debug-ballast.html) subcommands. + +We recommend using the [`encryption-decrypt`]({% link {{ page.version.version }}/cockroach-debug-encryption-decrypt.md %}) and [`job-trace`]({% link {{ page.version.version }}/cockroach-debug-job-trace.md %}) subcommands only when directed by the [Cockroach Labs support team]({% link {{ page.version.version }}/support-resources.md %}). + +The other `debug` subcommands are useful only to Cockroach Labs. Output of `debug` commands may contain sensitive or secret information. diff --git a/src/current/_includes/v25.3/misc/delete-statistics.md b/src/current/_includes/v25.3/misc/delete-statistics.md new file mode 100644 index 00000000000..6954194fc75 --- /dev/null +++ b/src/current/_includes/v25.3/misc/delete-statistics.md @@ -0,0 +1,15 @@ +To delete statistics for all tables in all databases: + +{% include_cached copy-clipboard.html %} +~~~ sql +DELETE FROM system.table_statistics WHERE true; +~~~ + +To delete a named set of statistics (e.g, one named "users_stats"), run a query like the following: + +{% include_cached copy-clipboard.html %} +~~~ sql +DELETE FROM system.table_statistics WHERE name = 'users_stats'; +~~~ + +For more information about the `DELETE` statement, see [`DELETE`]({% link {{ page.version.version }}/delete.md %}). diff --git a/src/current/_includes/v25.3/misc/diagnostics-callout.html b/src/current/_includes/v25.3/misc/diagnostics-callout.html new file mode 100644 index 00000000000..a969a8cf152 --- /dev/null +++ b/src/current/_includes/v25.3/misc/diagnostics-callout.html @@ -0,0 +1 @@ +{{site.data.alerts.callout_info}}By default, each node of a CockroachDB cluster periodically shares anonymous usage details with Cockroach Labs. For an explanation of the details that get shared and how to opt-out of reporting, see Diagnostics Reporting.{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/enterprise-features.md b/src/current/_includes/v25.3/misc/enterprise-features.md new file mode 100644 index 00000000000..5f9193e1ca9 --- /dev/null +++ b/src/current/_includes/v25.3/misc/enterprise-features.md @@ -0,0 +1,32 @@ +## Cluster optimization + +Feature | Description +--------+------------------------- +[Read Committed isolation]({% link {{ page.version.version }}/read-committed.md %}) | Achieve predictable query performance at high workload concurrencies, but without guaranteed transaction serializability. +[Follower Reads]({% link {{ page.version.version }}/follower-reads.md %}) | Reduce read latency in multi-region deployments by using the closest replica at the expense of reading slightly historical data. +[Multi-Region Capabilities]({% link {{ page.version.version }}/multiregion-overview.md %}) | Row-level control over where your data is stored to help you reduce read and write latency and meet regulatory requirements. +[PL/pgSQL]({% link {{ page.version.version }}/plpgsql.md %}) | Use a procedural language in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) and [stored procedures]({% link {{ page.version.version }}/stored-procedures.md %}) to improve performance and enable more complex queries. +[Node Map]({% link {{ page.version.version }}/enable-node-map.md %}) | Visualize the geographical distribution of a cluster by plotting its node localities on a world map. +[Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) | Improve performance for prepared statements by enabling generic plans that eliminate most of the query latency attributed to planning. +[`VECTOR` type]({% link {{ page.version.version }}/vector.md %}) | Represent data points in multi-dimensional space, using fixed-length arrays of floating-point numbers. + +## Recovery and streaming + +Feature | Description +--------+------------------------- +[`BACKUP`]({% link {{ page.version.version }}/backup.md %}) and restore capabilities | Taking and restoring [full backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}), [incremental backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}), [backups with revision history]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}), [locality-aware backups](take-and-restore-locality-aware-backups.html), and [encrypted backups](take-and-restore-encrypted-backups.html). +[Changefeeds into a Configurable Sink]({% link {{ page.version.version }}/create-changefeed.md %}) | For every change in a configurable allowlist of tables, configure a changefeed to emit a record to a configurable sink: Apache Kafka, cloud storage, Google Cloud Pub/Sub, or a webhook sink. These records can be processed by downstream systems for reporting, caching, or full-text indexing. +[Change Data Capture Queries]({% link {{ page.version.version }}/cdc-queries.md %}) | Use `SELECT` queries to filter and modify change data before sending it to a changefeed's sink. +[Physical Cluster Replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) | Send all data at the byte level from a primary cluster to an independent standby cluster. Existing data and ongoing changes on the active primary cluster, which is serving application data, replicate asynchronously to the passive standby cluster. + +## Security and IAM + +Feature | Description +--------+------------------------- +[Encryption at Rest]({% link {{ page.version.version }}/security-reference/encryption.md %}#encryption-at-rest-enterprise) | Enable automatic transparent encryption of a node's data on the local disk using AES in counter mode, with all key sizes allowed. This feature works together with CockroachDB's automatic encryption of data in transit. +[Column-level encryption]({% link {{ page.version.version }}/column-level-encryption.md %}) | Encrypt specific columns within a table. +[GSSAPI with Kerberos Authentication]({% link {{ page.version.version }}/gssapi_authentication.md %}) | Authenticate to your cluster using identities stored in an external enterprise directory system that supports Kerberos, such as Active Directory. +[Cluster Single Sign-on (SSO)]({% link {{ page.version.version }}/sso-sql.md %}) | Grant SQL access to a cluster using JSON Web Tokens (JWTs) issued by an external identity provider (IdP) or custom JWT issuer. +[Single Sign-on (SSO) for DB Console]({% link {{ page.version.version }}/sso-db-console.md %}) | Grant access to a cluster's DB Console interface using SSO through an IdP that supports OIDC. +[Role-based SQL Audit Logs]({% link {{ page.version.version }}/role-based-audit-logging.md %}) | Enable logging of queries being executed against your system by specific users or roles. +[Certificate-based authentication using multiple values from the X.509 Subject field]({% link {{ page.version.version }}/certificate-based-authentication-using-the-x509-subject-field.md %}) | Map SQL user [roles]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) to values in the Subject field of the [X.509 certificate](https://en.wikipedia.org/wiki/X.509) used for [TLS authentication]({% link {{ page.version.version }}/security-reference/transport-layer-security.md %}#what-is-transport-layer-security-tls). diff --git a/src/current/_includes/v25.3/misc/explore-benefits-see-also.md b/src/current/_includes/v25.3/misc/explore-benefits-see-also.md new file mode 100644 index 00000000000..2ad7178c808 --- /dev/null +++ b/src/current/_includes/v25.3/misc/explore-benefits-see-also.md @@ -0,0 +1,7 @@ +- [Replication & Rebalancing]({% link {{ page.version.version }}/demo-replication-and-rebalancing.md %}) +- [CockroachDB Resilience]({% link {{ page.version.version }}/demo-cockroachdb-resilience.md %}) +- [Low Latency Multi-Region Deployment]({% link {{ page.version.version }}/demo-low-latency-multi-region-deployment.md %}) +- [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) +- [Cross-Cloud Migration]({% link {{ page.version.version }}/demo-automatic-cloud-migration.md %}) +- [Orchestration]({% link {{ page.version.version }}/orchestrate-a-local-cluster-with-kubernetes-insecure.md %}) +- [JSON Support]({% link {{ page.version.version }}/demo-json-support.md %}) diff --git a/src/current/_includes/v25.3/misc/external-connection-kafka.md b/src/current/_includes/v25.3/misc/external-connection-kafka.md new file mode 100644 index 00000000000..2ffa2b599a1 --- /dev/null +++ b/src/current/_includes/v25.3/misc/external-connection-kafka.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +You can create an external connection to represent a Kafka sink URI. This allows you to specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/external-connection-note.md b/src/current/_includes/v25.3/misc/external-connection-note.md new file mode 100644 index 00000000000..f9bc7914ed8 --- /dev/null +++ b/src/current/_includes/v25.3/misc/external-connection-note.md @@ -0,0 +1 @@ +You can create an external connection to represent an external storage or sink URI. This allows you to specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/external-io-privilege.md b/src/current/_includes/v25.3/misc/external-io-privilege.md new file mode 100644 index 00000000000..c3f92f8e24e --- /dev/null +++ b/src/current/_includes/v25.3/misc/external-io-privilege.md @@ -0,0 +1 @@ +You can grant a user the `EXTERNALIOIMPLICITACCESS` [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) to interact with external resources that require implicit access. \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/force-index-selection.md b/src/current/_includes/v25.3/misc/force-index-selection.md new file mode 100644 index 00000000000..a2f70c98ee6 --- /dev/null +++ b/src/current/_includes/v25.3/misc/force-index-selection.md @@ -0,0 +1,157 @@ +By using the explicit index annotation, you can override [CockroachDB's index selection](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/) and use a specific [index]({% link {{ page.version.version }}/indexes.md %}) when reading from a named table. + +{{site.data.alerts.callout_info}} +Index selection can impact [performance]({% link {{ page.version.version }}/performance-best-practices-overview.md %}), but does not change the result of a query. +{{site.data.alerts.end}} + +##### Force index scan + +To force a scan of a specific index: + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM table@my_idx; +~~~ + +This is equivalent to the longer expression: + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM table@{FORCE_INDEX=my_idx}; +~~~ + +##### Force reverse scan + +To force a reverse scan of a specific index: + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM table@{FORCE_INDEX=my_idx,DESC}; +~~~ + +Forcing a reverse scan can help with [performance tuning]({% link {{ page.version.version }}/performance-best-practices-overview.md %}). To choose an index and its scan direction: + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM table@{FORCE_INDEX=idx[,DIRECTION]}; +~~~ + +where the optional `DIRECTION` is either `ASC` (ascending) or `DESC` (descending). + +When a direction is specified, that scan direction is forced; otherwise the [cost-based optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) is free to choose the direction it calculates will result in the best performance. + +You can verify that the optimizer is choosing your desired scan direction using [`EXPLAIN (OPT)`]({% link {{ page.version.version }}/explain.md %}#opt-option). For example, given the table + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE kv (K INT PRIMARY KEY, v INT); +~~~ + +you can check the scan direction with: + +{% include_cached copy-clipboard.html %} +~~~ sql +EXPLAIN (opt) SELECT * FROM users@{FORCE_INDEX=primary,DESC}; +~~~ + +~~~ + text ++-------------------------------------+ + scan users,rev + └── flags: force-index=primary,rev +(2 rows) +~~~ + +#### Force inverted index scan + +To force a scan of any [inverted index]({% link {{ page.version.version }}/inverted-indexes.md %}) of the hinted table: + +{% include_cached copy-clipboard.html %} +~~~ sql +SELECT * FROM table@{FORCE_INVERTED_INDEX}; +~~~ + +The `FORCE_INVERTED_INDEX` hint does not allow specifying an inverted index. If no query plan can be generated, the query will result in the error: + +~~~ +ERROR: could not produce a query plan conforming to the FORCE_INVERTED_INDEX hint +~~~ + +##### Force partial index scan + +To force a [partial index scan]({% link {{ page.version.version }}/partial-indexes.md %}), your statement must have a `WHERE` clause that implies the partial index filter. + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE t ( + a INT, + INDEX idx (a) WHERE a > 0); +INSERT INTO t(a) VALUES (5); +SELECT * FROM t@idx WHERE a > 0; +~~~ + +~~~ +CREATE TABLE + +Time: 13ms total (execution 12ms / network 0ms) + +INSERT 1 + +Time: 22ms total (execution 21ms / network 0ms) + + a +----- + 5 +(1 row) + +Time: 1ms total (execution 1ms / network 0ms) +~~~ + +##### Force partial GIN index scan + +To force a [partial GIN index]({% link {{ page.version.version }}/inverted-indexes.md %}#partial-gin-indexes) scan, your statement must have a `WHERE` clause that: + +- Implies the partial index. +- Constrains the GIN index scan. + +{% include_cached copy-clipboard.html %} +~~~ sql +DROP TABLE t; +CREATE TABLE t ( + j JSON, + INVERTED INDEX idx (j) WHERE j->'a' = '1'); +INSERT INTO t(j) + VALUES ('{"a": 1}'), + ('{"a": 3, "b": 2}'), + ('{"a": 1, "b": 2}'); +SELECT * FROM t@idx WHERE j->'a' = '1' AND j->'b' = '2'; +~~~ + +~~~ +DROP TABLE + +Time: 68ms total (execution 22ms / network 45ms) + +CREATE TABLE + +Time: 10ms total (execution 10ms / network 0ms) + +INSERT 3 + +Time: 22ms total (execution 22ms / network 0ms) + + j +-------------------- + {"a": 1, "b": 2} +(1 row) + +Time: 1ms total (execution 1ms / network 0ms) +~~~ + +##### Prevent full scan + +{% include {{ page.version.version }}/sql/no-full-scan.md %} + +{{site.data.alerts.callout_success}} +For other ways to prevent full scans, refer to [Prevent the optimizer from planning full scans]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#prevent-the-optimizer-from-planning-full-scans). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/gce-locations.md b/src/current/_includes/v25.3/misc/gce-locations.md new file mode 100644 index 00000000000..22122aae78d --- /dev/null +++ b/src/current/_includes/v25.3/misc/gce-locations.md @@ -0,0 +1,18 @@ +| Location | SQL Statement | +| ------ | ------ | +| us-east1 (South Carolina) | `INSERT into system.locations VALUES ('region', 'us-east1', 33.836082, -81.163727)` | +| us-east4 (N. Virginia) | `INSERT into system.locations VALUES ('region', 'us-east4', 37.478397, -76.453077)` | +| us-central1 (Iowa) | `INSERT into system.locations VALUES ('region', 'us-central1', 42.032974, -93.581543)` | +| us-west1 (Oregon) | `INSERT into system.locations VALUES ('region', 'us-west1', 43.804133, -120.554201)` | +| northamerica-northeast1 (Montreal) | `INSERT into system.locations VALUES ('region', 'northamerica-northeast1', 56.130366, -106.346771)` | +| europe-west1 (Belgium) | `INSERT into system.locations VALUES ('region', 'europe-west1', 50.44816, 3.81886)` | +| europe-west2 (London) | `INSERT into system.locations VALUES ('region', 'europe-west2', 51.507351, -0.127758)` | +| europe-west3 (Frankfurt) | `INSERT into system.locations VALUES ('region', 'europe-west3', 50.110922, 8.682127)` | +| europe-west4 (Netherlands) | `INSERT into system.locations VALUES ('region', 'europe-west4', 53.4386, 6.8355)` | +| europe-west6 (Zürich) | `INSERT into system.locations VALUES ('region', 'europe-west6', 47.3769, 8.5417)` | +| asia-east1 (Taiwan) | `INSERT into system.locations VALUES ('region', 'asia-east1', 24.0717, 120.5624)` | +| asia-northeast1 (Tokyo) | `INSERT into system.locations VALUES ('region', 'asia-northeast1', 35.689487, 139.691706)` | +| asia-southeast1 (Singapore) | `INSERT into system.locations VALUES ('region', 'asia-southeast1', 1.352083, 103.819836)` | +| australia-southeast1 (Sydney) | `INSERT into system.locations VALUES ('region', 'australia-southeast1', -33.86882, 151.209296)` | +| asia-south1 (Mumbai) | `INSERT into system.locations VALUES ('region', 'asia-south1', 19.075984, 72.877656)` | +| southamerica-east1 (São Paulo) | `INSERT into system.locations VALUES ('region', 'southamerica-east1', -23.55052, -46.633309)` | diff --git a/src/current/_includes/v25.3/misc/geojson_geometry_note.md b/src/current/_includes/v25.3/misc/geojson_geometry_note.md new file mode 100644 index 00000000000..a023f205c20 --- /dev/null +++ b/src/current/_includes/v25.3/misc/geojson_geometry_note.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +The screenshots in these examples were generated using [geojson.io](http://geojson.io), but they are designed to showcase the shapes, not the map. Representing `GEOMETRY` data in GeoJSON can lead to unexpected results if using geometries with [SRIDs]({% link {{ page.version.version }}/architecture/glossary.md %}#srid) other than 4326 (as shown below). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/haproxy.md b/src/current/_includes/v25.3/misc/haproxy.md new file mode 100644 index 00000000000..c94bd654466 --- /dev/null +++ b/src/current/_includes/v25.3/misc/haproxy.md @@ -0,0 +1,39 @@ +By default, the generated configuration file is called `haproxy.cfg` and looks as follows, with the `server` addresses pre-populated correctly: + + ~~~ + global + maxconn 4096 + + defaults + mode tcp + # Timeout values should be configured for your specific use. + # See: https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#4-timeout%20connect + timeout connect 10s + timeout client 1m + timeout server 1m + # TCP keep-alive on client side. Server already enables them. + option clitcpka + + listen psql + bind :26257 + mode tcp + balance roundrobin + option httpchk GET /health?ready=1 + server cockroach1 :26257 check port 8080 + server cockroach2 :26257 check port 8080 + server cockroach3 :26257 check port 8080 + ~~~ + + The file is preset with the minimal [configurations](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html) needed to work with your running cluster: + + Field | Description + ------|------------ + `timeout connect`
`timeout client`
`timeout server` | Timeout values that should be suitable for most deployments. + `bind` | The port that HAProxy listens on. This is the port clients will connect to and thus needs to be allowed by your network configuration.

This tutorial assumes HAProxy is running on a separate machine from CockroachDB nodes. If you run HAProxy on the same machine as a node (not recommended), you'll need to change this port, as `26257` is likely already being used by the CockroachDB node. + `balance` | The balancing algorithm. This is set to `roundrobin` to ensure that connections get rotated amongst nodes (connection 1 on node 1, connection 2 on node 2, etc.). Check the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html#4-balance) for details about this and other balancing algorithms. + `option httpchk` | The HTTP endpoint that HAProxy uses to check node health. [`/health?ready=1`]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#health-ready-1) ensures that HAProxy doesn't direct traffic to nodes that are live but not ready to receive requests. + `server` | For each included node, this field specifies the address the node advertises to other nodes in the cluster, i.e., the addressed pass in the [`--advertise-addr` flag]({% link {{ page.version.version }}/cockroach-start.md %}#networking) on node startup. Make sure hostnames are resolvable and IP addresses are routable from HAProxy. + + {{site.data.alerts.callout_info}} + For full details on these and other configuration settings, see the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html). + {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/htpp-import-only.md b/src/current/_includes/v25.3/misc/htpp-import-only.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/current/_includes/v25.3/misc/import-perf.md b/src/current/_includes/v25.3/misc/import-perf.md new file mode 100644 index 00000000000..34bee9acdb4 --- /dev/null +++ b/src/current/_includes/v25.3/misc/import-perf.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_success}} +For best practices for optimizing import performance in CockroachDB, see [Import Performance Best Practices]({% link {{ page.version.version }}/import-performance-best-practices.md %}). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/index-storage-parameters.md b/src/current/_includes/v25.3/misc/index-storage-parameters.md new file mode 100644 index 00000000000..e2aa6d76301 --- /dev/null +++ b/src/current/_includes/v25.3/misc/index-storage-parameters.md @@ -0,0 +1,14 @@ +| Parameter name | Description | Data type | Default value +|---------------------+----------------------|-----|------| +| `bucket_count` | The number of buckets into which a [hash-sharded index]({% link {{ page.version.version }}/hash-sharded-indexes.md %}) will split. | Integer | The value of the `sql.defaults.default_hash_sharded_index_bucket_count` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}). | +| `geometry_max_x` | The maximum X-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if you are using a custom [SRID]({% link {{ page.version.version }}/architecture/glossary.md %}#srid). | | Derived from SRID bounds, else `(1 << 31) -1`. | +| `geometry_max_y` | The maximum Y-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if you are using a custom [SRID]({% link {{ page.version.version }}/architecture/glossary.md %}#srid). | | Derived from SRID bounds, else `(1 << 31) -1`. | +| `geometry_min_x` | The minimum X-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if the default bounds of the SRID are too large/small for the given data, or SRID = 0 and you wish to use a smaller range (unfortunately this is currently not exposed, but is viewable on ). By default, SRID = 0 assumes `[-min int32, max int32]` ranges. | | Derived from SRID bounds, else `-(1 << 31)`. | +| `geometry_min_y` | The minimum Y-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if you are using a custom [SRID]({% link {{ page.version.version }}/architecture/glossary.md %}#srid). | | Derived from SRID bounds, else `-(1 << 31)`. | +| `s2_level_mod` | `s2_max_level` must be divisible by `s2_level_mod`. `s2_level_mod` must be between `1` and `3`. | Integer | `1` | +| `s2_max_cells` | The maximum number of S2 cells used in the covering. Provides a limit on how much work is done exploring the possible coverings. Allowed values: `1-30`. You may want to use higher values for odd-shaped regions such as skinny rectangles. Used in [spatial indexes]({% link {{ page.version.version }}/spatial-indexes.md %}). | Integer | `4` | +| `s2_max_level` | The maximum level of S2 cell used in the covering. Allowed values: `1-30`. Setting it to less than the default means that CockroachDB will be forced to generate coverings using larger cells. Used in [spatial indexes]({% link {{ page.version.version }}/spatial-indexes.md %}). | Integer | `30` | + +The following parameters are included for PostgreSQL compatibility and do not affect how CockroachDB runs: + +- `fillfactor` diff --git a/src/current/_includes/v25.3/misc/install-next-steps.html b/src/current/_includes/v25.3/misc/install-next-steps.html new file mode 100644 index 00000000000..32f77242643 --- /dev/null +++ b/src/current/_includes/v25.3/misc/install-next-steps.html @@ -0,0 +1,17 @@ + diff --git a/src/current/_includes/v25.3/misc/linux-binary-prereqs.md b/src/current/_includes/v25.3/misc/linux-binary-prereqs.md new file mode 100644 index 00000000000..541183fe71b --- /dev/null +++ b/src/current/_includes/v25.3/misc/linux-binary-prereqs.md @@ -0,0 +1 @@ +

The CockroachDB binary for Linux requires glibc, libncurses, and tzdata, which are found by default on nearly all Linux distributions, with Alpine as the notable exception.

diff --git a/src/current/_includes/v25.3/misc/logging-defaults.md b/src/current/_includes/v25.3/misc/logging-defaults.md new file mode 100644 index 00000000000..eabdd93755b --- /dev/null +++ b/src/current/_includes/v25.3/misc/logging-defaults.md @@ -0,0 +1,3 @@ +By default, this command logs messages to `stderr`. This includes events with `WARNING` [severity]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities) and higher. + +If you need to troubleshoot this command's behavior, you can [customize its logging behavior]({% link {{ page.version.version }}/configure-logs.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/logging-flags.md b/src/current/_includes/v25.3/misc/logging-flags.md new file mode 100644 index 00000000000..68106413559 --- /dev/null +++ b/src/current/_includes/v25.3/misc/logging-flags.md @@ -0,0 +1,12 @@ +Flag | Description +-----|------------ +`--log` | Configure logging parameters by specifying a YAML payload. For details, see [Configure logs]({% link {{ page.version.version }}/configure-logs.md %}#flag). If a YAML configuration is not specified, the [default configuration]({% link {{ page.version.version }}/configure-logs.md %}#default-logging-configuration) is used.

`--log-config-file` can also be used.

**Note:** The logging flags below cannot be combined with `--log`, but can be defined instead in the YAML payload. +`--log-config-file` | Configure logging parameters by specifying a path to a YAML file. For details, see [Configure logs]({% link {{ page.version.version }}/configure-logs.md %}#flag). If a YAML configuration is not specified, the [default configuration]({% link {{ page.version.version }}/configure-logs.md %}#default-logging-configuration) is used.

`--log` can also be used.

**Note:** The logging flags below cannot be combined with `--log-config-file`, but can be defined instead in the YAML file. +`--log-dir` | An alias for the [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, for configuring the log directory where log files are stored and written to. Specifically, `--log-dir=XXX` is an alias for `--log='file-defaults: {dir: XXX}'`.

Setting `--log-dir` to a blank directory (`--log-dir=`) disables logging to files. Do not use `--log-dir=""`; this creates a new directory named `""` and stores log files in that directory. +`--log-group-max-size` | An alias for the [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, for configuring the maximum size for a logging group (for example, `cockroach`, `cockroach-sql-audit`, `cockroach-auth`, `cockroach-sql-exec`, `cockroach-pebble`), after which the oldest log file is deleted. `--log-group-max-size=XXX` is an alias for `--log='file-defaults: {max-group-size: XXX}'`. Accepts a valid file size, such as `--log-group-max-size=1GiB`.

**Default:** `100MiB` +`--log-file-max-size` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), used to specify the maximum size that a log file can grow before a new log file is created. `--log-file-max-size=XXX` is an alias for `--log='file-defaults: {max-file-size: XXX}'`. Accepts a valid file size, such as `--log-file-max-size=2MiB`. **Requires** logging to files.

**Default:** `10MiB` +`--log-file-verbosity` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), used to specify the minimum [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities) of messages that are logged. `--log-file-verbosity=XXX` is an alias for `--log='file-defaults: {filter: XXX}'`. When a severity is specified, such as `--log-file-verbosity=WARNING`, log messages that are below the specified severity level are not written to the target log file. **Requires** logging to files.

**Default:** `INFO` +`--logtostderr` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), to optionally output log messages at or above the configured [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities) to the `stderr` sink. `--logtostderr=XXX` is an alias for `--log='sinks: {stderr: {filter: XXX}}'`. Accepts a valid [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities). If no value is specified, by default messages related to server commands are logged to `stderr` at `INFO` severity and above, and messages related to client commands are logged to `stderr` at `WARNING` severity and above.

Setting `--logtostderr=NONE` disables logging to `stderr`.

**Default:** `UNKNOWN` +`--no-color` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, used to control whether log output to the `stderr` sinc is colorized. `--no-color=XXX` is an alias for `--log='sinks: {stderr: {no-color: XXX}}'`. Accepts either `true` or `false`.

When set to `false`, messages logged to `stderr` are colorized based on [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities).

**Default:** `false` +`--redactable-logs` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, used to whether [redaction markers]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) are used in place of secret or sensitive information in log messages. `--redactable-logs=XXX` is an alias for `--log='file-defaults: {redactable: XXX}'`. Accepts `true` or `false`.

**Default:** `false` +`--sql-audit-dir` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), used to optionally confine log output of the `SENSITIVE_ACCESS` [logging channel]({% link {{ page.version.version }}/logging-overview.md %}#logging-channels) to a separate directory. `--sql-audit-dir=XXX` is an alias for `--log='sinks: {file-groups: {sql-audit: {channels: SENSITIVE_ACCESS, dir: ...}}}'`.

Enabling `SENSITIVE_ACCESS` logs can negatively impact performance. As a result, we recommend using the `SENSITIVE_ACCESS` channel for security purposes only. For more information, refer to [Security and Audit Monitoring]({% link {{ page.version.version }}/logging-use-cases.md %}#security-and-audit-monitoring). diff --git a/src/current/_includes/v25.3/misc/movr-live-demo.md b/src/current/_includes/v25.3/misc/movr-live-demo.md new file mode 100644 index 00000000000..f8cfb24cb21 --- /dev/null +++ b/src/current/_includes/v25.3/misc/movr-live-demo.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_success}} +For a live demo of the deployed example application, see [https://movr.cloud](https://movr.cloud). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/movr-schema.md b/src/current/_includes/v25.3/misc/movr-schema.md new file mode 100644 index 00000000000..e838bcf4572 --- /dev/null +++ b/src/current/_includes/v25.3/misc/movr-schema.md @@ -0,0 +1,12 @@ +The six tables in the `movr` database store user, vehicle, and ride data for MovR: + +Table | Description +--------|---------------------------- +`users` | People registered for the service. +`vehicles` | The pool of vehicles available for the service. +`rides` | When and where users have rented a vehicle. +`promo_codes` | Promotional codes for users. +`user_promo_codes` | Promotional codes in use by users. +`vehicle_location_histories` | Vehicle location history. + +Geo-partitioning schema diff --git a/src/current/_includes/v25.3/misc/movr-workflow.md b/src/current/_includes/v25.3/misc/movr-workflow.md new file mode 100644 index 00000000000..a682c099b70 --- /dev/null +++ b/src/current/_includes/v25.3/misc/movr-workflow.md @@ -0,0 +1,76 @@ +The workflow for MovR is as follows: + +1. A user loads the app and sees the 25 closest vehicles. + + For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SELECT id, city, status FROM vehicles WHERE city='amsterdam' limit 25; + ~~~ + +1. The user signs up for the service. + + For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO users (id, name, address, city, credit_card) + VALUES ('66666666-6666-4400-8000-00000000000f', 'Mariah Lam', '88194 Angela Gardens Suite 60', 'amsterdam', '123245696'); + ~~~ + + {{site.data.alerts.callout_info}}Usually for Universally Unique Identifier (UUID) you would need to generate it automatically but for the sake of this follow up we will use predetermined UUID to keep track of them in our examples.{{site.data.alerts.end}} + +1. In some cases, the user adds their own vehicle to share. + + For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO vehicles (id, city, type, owner_id,creation_time,status, current_location, ext) + VALUES ('ffffffff-ffff-4400-8000-00000000000f', 'amsterdam', 'skateboard', '66666666-6666-4400-8000-00000000000f', current_timestamp(), 'available', '88194 Angela Gardens Suite 60', '{"color": "blue"}'); + ~~~ +1. More often, the user reserves a vehicle and starts a ride, applying a promo code, if available and valid. + + For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SELECT code FROM user_promo_codes WHERE user_id ='66666666-6666-4400-8000-00000000000f'; + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > UPDATE vehicles SET status = 'in_use' WHERE id='bbbbbbbb-bbbb-4800-8000-00000000000b'; + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO rides (id, city, vehicle_city, rider_id, vehicle_id, start_address,end_address, start_time, end_time, revenue) + VALUES ('cd032f56-cf1a-4800-8000-00000000066f', 'amsterdam', 'amsterdam', '66666666-6666-4400-8000-00000000000f', 'bbbbbbbb-bbbb-4800-8000-00000000000b', '70458 Mary Crest', '', TIMESTAMP '2020-10-01 10:00:00.123456', NULL, 0.0); + ~~~ + +1. During the ride, MovR tracks the location of the vehicle. + + For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO vehicle_location_histories (city, ride_id, timestamp, lat, long) + VALUES ('amsterdam', 'cd032f56-cf1a-4800-8000-00000000066f', current_timestamp(), -101, 60); + ~~~ + +1. The user ends the ride and releases the vehicle. + + For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > UPDATE vehicles SET status = 'available' WHERE id='bbbbbbbb-bbbb-4800-8000-00000000000b'; + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > UPDATE rides SET end_address ='33862 Charles Junctions Apt. 49', end_time=TIMESTAMP '2020-10-01 10:30:00.123456', revenue=88.6 + WHERE id='cd032f56-cf1a-4800-8000-00000000066f'; + ~~~ diff --git a/src/current/_includes/v25.3/misc/multiregion-max-offset.md b/src/current/_includes/v25.3/misc/multiregion-max-offset.md new file mode 100644 index 00000000000..794ed12ca84 --- /dev/null +++ b/src/current/_includes/v25.3/misc/multiregion-max-offset.md @@ -0,0 +1 @@ +For new clusters using the [multi-region SQL abstractions]({% link {{ page.version.version }}/multiregion-overview.md %}), Cockroach Labs recommends lowering the [`--max-offset`]({% link {{ page.version.version }}/cockroach-start.md %}#flags-max-offset) setting to `250ms`. This setting is especially helpful for lowering the write latency of [global tables]({% link {{ page.version.version }}/table-localities.md %}#global-tables). Nodes can run with different values for `--max-offset`, but only for the purpose of updating the setting across the cluster using a rolling upgrade. diff --git a/src/current/_includes/v25.3/misc/note-egress-perimeter-cdc-backup.md b/src/current/_includes/v25.3/misc/note-egress-perimeter-cdc-backup.md new file mode 100644 index 00000000000..9b80cec66f6 --- /dev/null +++ b/src/current/_includes/v25.3/misc/note-egress-perimeter-cdc-backup.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +Cockroach Labs recommends enabling Egress Perimeter Controls on CockroachDB {{ site.data.products.advanced }} clusters to mitigate the risk of data exfiltration when accessing external resources, such as cloud storage for change data capture or backup and restore operations. See [Egress Perimeter Controls]({% link cockroachcloud/egress-perimeter-controls.md %}) for detail and setup instructions. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/remove-user-callout.html b/src/current/_includes/v25.3/misc/remove-user-callout.html new file mode 100644 index 00000000000..925f83d779d --- /dev/null +++ b/src/current/_includes/v25.3/misc/remove-user-callout.html @@ -0,0 +1 @@ +Removing a user does not remove that user's privileges. Therefore, to prevent a future user with an identical username from inheriting an old user's privileges, it's important to revoke a user's privileges before or after removing the user. diff --git a/src/current/_includes/v25.3/misc/s3-compatible-warning.md b/src/current/_includes/v25.3/misc/s3-compatible-warning.md new file mode 100644 index 00000000000..5981d5b7609 --- /dev/null +++ b/src/current/_includes/v25.3/misc/s3-compatible-warning.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +While Cockroach Labs actively tests Amazon S3, Google Cloud Storage, and Azure Storage, we **do not** test{% if page.name == "cloud-storage-authentication.md" %} S3-compatible services {% else %} [S3-compatible services]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) {% endif %} (e.g., [MinIO](https://min.io/), [Red Hat Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/schema-change-stmt-note.md b/src/current/_includes/v25.3/misc/schema-change-stmt-note.md new file mode 100644 index 00000000000..792cd2b9e51 --- /dev/null +++ b/src/current/_includes/v25.3/misc/schema-change-stmt-note.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +The `{{ page.title }}` statement performs a schema change. For more information about how online schema changes work in CockroachDB, see [Online Schema Changes]({% link {{ page.version.version }}/online-schema-changes.md %}). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/misc/schema-change-view-job.md b/src/current/_includes/v25.3/misc/schema-change-view-job.md new file mode 100644 index 00000000000..37e46feb4d8 --- /dev/null +++ b/src/current/_includes/v25.3/misc/schema-change-view-job.md @@ -0,0 +1 @@ +This schema change statement is registered as a job. You can view long-running jobs with [`SHOW JOBS`]({% link {{ page.version.version }}/show-jobs.md %}). diff --git a/src/current/_includes/v25.3/misc/session-vars.md b/src/current/_includes/v25.3/misc/session-vars.md new file mode 100644 index 00000000000..ec0a2b1fcd7 --- /dev/null +++ b/src/current/_includes/v25.3/misc/session-vars.md @@ -0,0 +1,115 @@ +| Variable name | Description | Initial value | Modify with [`SET`]({% link {{ page.version.version }}/set-vars.md %})? | View with [`SHOW`]({% link {{ page.version.version }}/show-vars.md %})? | +|---|---|---|---|---| +| `always_distribute_full_scans` | When set to `on`, full table scans are always [distributed]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql). | `off` | Yes | Yes | +| `application_name` | The current application name for statistics collection. | Empty string, or `cockroach` for sessions from the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}). | Yes | Yes | +| `autocommit_before_ddl` | When the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `on`, any schema change statement that is sent during an [explicit transaction]({% link {{page.version.version}}/transactions.md %}) will cause the transaction to [commit]({% link {{page.version.version}}/commit-transaction.md %}) before executing the schema change. This is useful because [CockroachDB does not fully support multiple schema changes in a single transaction]({% link {{ page.version.version }}/online-schema-changes.md %}#schema-changes-within-transactions). : This setting is enabled by default. To disable it for [all roles]({% link {{ page.version.version }}/alter-role.md %}#set-default-session-variable-values-for-all-users), issue the following statement: `ALTER ROLE ALL SET autocommit_before_ddl = false` | `on` | Yes | Yes | +| `bytea_output` | The [mode for conversions from `STRING` to `BYTES`]({% link {{ page.version.version }}/bytes.md %}#supported-conversions). | hex | Yes | Yes | +| `client_min_messages` | The severity level of notices displayed in the [SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}). Accepted values include `debug5`, `debug4`, `debug3`, `debug2`, `debug1`, `log`, `notice`, `warning`, and `error`. | `notice` | Yes | Yes | +| `copy_from_atomic_enabled` | If set to `on`, [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) statements are committed atomically, matching PostgreSQL behavior. If set to `off`, `COPY FROM` statements are segmented into batches of 100 rows unless issued within an explicit transaction, matching the CockroachDB behavior in versions prior to v22.2. | `on` | Yes | Yes | +| `copy_transaction_quality_of_service` | The default quality of service for [`COPY`]({% link {{ page.version.version }}/copy.md %}) statements in the current session. The supported options are `regular`, `critical`, and `background`. See [Set quality of service level]({% link {{ page.version.version }}/admission-control.md %}#copy-qos). | `background` | Yes | Yes | +| `cost_scans_with_default_col_size` | Whether to prevent the optimizer from considering column size when costing plans. | `false` | Yes | Yes | +| `crdb_version` | The version of CockroachDB. | CockroachDB OSS version | No | Yes | +| `database` | The [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database). | Database in connection string, or empty if not specified. | Yes | Yes | +| `datestyle` | The input string format for [`DATE`]({% link {{ page.version.version }}/date.md %}) and [`TIMESTAMP`]({% link {{ page.version.version }}/timestamp.md %}) values. Accepted values include `ISO,MDY`, `ISO,DMY`, and `ISO,YMD`. | The value set by the `sql.defaults.datestyle` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`ISO,MDY`, by default). | Yes | Yes | +| `default_int_size` | The size, in bytes, of an [`INT`]({% link {{ page.version.version }}/int.md %}) type. | `8` | Yes | Yes | +| `default_text_search_config` | The dictionary used to normalize tokens and eliminate stop words when calling a [full-text search function]({% link {{ page.version.version }}/functions-and-operators.md %}#full-text-search-functions) without a configuration parameter. See [Full-Text Search]({% link {{ page.version.version }}/full-text-search.md %}). | `english` | Yes | Yes | +| `default_transaction_isolation` | The isolation level at which transactions in the session execute ([`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) or [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %})). See [Isolation levels]({% link {{ page.version.version }}/transactions.md %}#isolation-levels). | `SERIALIZABLE` | Yes | Yes | +| `default_transaction_priority` | The default transaction priority for the current session. The supported options are `low`, `normal`, and `high`. | `normal` | Yes | Yes | +| `default_transaction_quality_of_service` | The default transaction quality of service for the current session. The supported options are `regular`, `critical`, and `background`. See [Set quality of service level]({% link {{ page.version.version }}/admission-control.md %}#set-quality-of-service-level-for-a-session). | `regular` | Yes | Yes | +| `default_transaction_read_only` | The default transaction access mode for the current session.
If set to `on`, only read operations are allowed in transactions in the current session; if set to `off`, both read and write operations are allowed. See [`SET TRANSACTION`]({% link {{ page.version.version }}/set-transaction.md %}) for more details. | `off` | Yes | Yes | +| `default_transaction_use_follower_reads` | If set to on, all read-only transactions use [`AS OF SYSTEM TIME follower_read_timestamp()`]({% link {{ page.version.version }}/as-of-system-time.md %}) to allow the transaction to use follower reads.
If set to `off`, read-only transactions will only use follower reads if an `AS OF SYSTEM TIME` clause is specified in the statement, with an interval of at least 4.8 seconds. | `off` | Yes | Yes | +| `disable_changefeed_replication` | When `true`, [changefeeds]({% link {{ page.version.version }}/changefeed-messages.md %}#filtering-changefeed-messages) will not emit messages for any changes (e.g., `INSERT`, `UPDATE`) issued to watched tables during that session. | `false` | Yes | Yes | +| `disallow_full_table_scans` | If set to `on`, queries on "large" tables with a row count greater than [`large_full_scan_rows`](#large-full-scan-rows) will not use full table or index scans. If no other query plan is possible, queries will return an error message. This setting does not apply to internal queries, which may plan full table or index scans without checking the session variable. | `off` | Yes | Yes | +| `distribute_group_by_row_count_threshold` | Minimum number of rows that a `GROUP BY` operation must process in order to be [distributed]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql). | `1000` | Yes | Yes | +| `distribute_scan_row_count_threshold` | Minimum number of rows that a scan operation must process in order to be [distributed]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql). This means that full table scans will not be distributed if they read fewer than this number of rows. To always distribute full table scans, set [`always_distribute_full_scans`](#always-distribute-full-scans). | `10000` | Yes | Yes | +| `distribute_sort_row_count_threshold` | Minimum number of rows that a sort operation must process in order to be [distributed]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql). | `1000` | Yes | Yes | +| `distsql` | The query distribution mode for the session. By default, CockroachDB determines which queries are faster to execute if distributed across multiple nodes. Distribution preferences for `GROUP BY`, scan, and sort operations are set with [`distribute_group_by_row_count_threshold`](#distribute-group-by-row-count-threshold), [`distribute_scan_row_count_threshold.`](#distribute-scan-row-count-threshold) and [`distribute_sort_row_count_threshold.`](#distribute-sort-row-count-threshold), respectively. All other queries are run through the gateway node. | `auto` | Yes | Yes | +| `enable_auto_rehoming` | When enabled, the [home regions]({% link {{ page.version.version }}/alter-table.md %}#crdb_region) of rows in [`REGIONAL BY ROW`]({% link {{ page.version.version }}/alter-table.md %}#set-the-table-locality-to-regional-by-row) tables are automatically set to the region of the [gateway node]({% link {{ page.version.version }}/ui-sessions-page.md %}#session-details-gateway-node) from which any [`UPDATE`]({% link {{ page.version.version }}/update.md %}) or [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statements that operate on those rows originate. | `off` | Yes | Yes | +| `enable_durable_locking_for_serializable` | Indicates whether CockroachDB replicates [`FOR UPDATE` and `FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}#lock-strengths) locks via [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft), allowing locks to be preserved when leases are transferred. Note that replicating `FOR UPDATE` and `FOR SHARE` locks will add latency to those statements. This setting only affects `SERIALIZABLE` transactions and matches the default `READ COMMITTED` behavior when enabled. | `off` | Yes | Yes | +| `enable_implicit_fk_locking_for_serializable` | Indicates whether CockroachDB uses [shared locks]({% link {{ page.version.version }}/select-for-update.md %}#lock-strengths) to perform [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks. To take effect, the [`enable_shared_locking_for_serializable`](#enable-shared-locking-for-serializable) setting must also be enabled. This setting only affects `SERIALIZABLE` transactions and matches the default `READ COMMITTED` behavior when enabled. | `off` | Yes | Yes | +| `enable_implicit_select_for_update` | Indicates whether [`UPDATE`]({% link {{ page.version.version }}/update.md %}), [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}), and [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements acquire locks using the `FOR UPDATE` locking mode during their initial row scan, which improves performance for contended workloads.

For more information about how `FOR UPDATE` locking works, see the documentation for [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}). | `on` | Yes | Yes | +| `enable_implicit_transaction_for_batch_statements` | Indicates whether multiple statements in a single query (a "batch statement") will all run in the same implicit transaction, which matches the PostgreSQL wire protocol. | `on` | Yes | Yes | +| `enable_insert_fast_path` | Indicates whether CockroachDB will use a specialized execution operator for inserting into a table. We recommend leaving this setting `on`. | `on` | Yes | Yes | +| `enable_shared_locking_for_serializable` | Indicates whether [shared locks]({% link {{ page.version.version }}/select-for-update.md %}#lock-strengths) are enabled for `SERIALIZABLE` transactions. When `off`, `SELECT` statements using `FOR SHARE` are still permitted under `SERIALIZABLE` isolation, but silently do not lock. | `off` | Yes | Yes | +| `enable_super_regions` | When enabled, you can define a super region: a set of [database regions]({% link {{ page.version.version }}/multiregion-overview.md %}#super-regions) on a multi-region cluster such that your [schema objects]({% link {{ page.version.version }}/schema-design-overview.md %}#database-schema-objects) will have all of their [replicas]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-replica) stored _only_ in regions that are members of the super region. | `off` | Yes | Yes | +| `enable_zigzag_join` | Indicates whether the [cost-based optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) will plan certain queries using a [zigzag merge join algorithm]({% link {{ page.version.version }}/cost-based-optimizer.md %}#zigzag-joins), which searches for the desired intersection by jumping back and forth between the indexes based on the fact that after constraining indexes, they share an ordering. | `on` | Yes | Yes | +| `enforce_home_region` | If set to `on`, queries return an error and in some cases a suggested resolution if they cannot run entirely in their home region. This can occur if a query has no home region (for example, if it reads from different home regions in a [regional by row table]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables)) or a query's home region differs from the [gateway]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) region. Note that only tables with `ZONE` [survivability]({% link {{ page.version.version }}/multiregion-survival-goals.md %}#when-to-use-zone-vs-region-survival-goals) can be scanned without error when this is enabled. For more information about home regions, see [Table localities]({% link {{ page.version.version }}/multiregion-overview.md %}#table-localities).

This feature is in preview. It is subject to change. | `off` | Yes | Yes | +| `enforce_home_region_follower_reads_enabled` | If `on` while the [`enforce_home_region`]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-whether-queries-are-limited-to-a-single-region) setting is `on`, allows `enforce_home_region` to perform `AS OF SYSTEM TIME` [follower reads]({% link {{ page.version.version }}/follower-reads.md %}) to detect and report a query's [home region]({% link {{ page.version.version }}/multiregion-overview.md %}#table-localities), if any.

This feature is in preview. It is subject to change. | `off` | Yes | Yes | +| `expect_and_ignore_not_visible_columns_in_copy` | If `on`, [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) with no column specifiers will assume that hidden columns are in the copy data, but will ignore them when applying `COPY FROM`. | `off` | Yes | Yes | +| `extra_float_digits` | The number of digits displayed for floating-point values. Only values between `-15` and `3` are supported. | `0` | Yes | Yes | +| `force_savepoint_restart` | When set to `true`, allows the [`SAVEPOINT`]({% link {{ page.version.version }}/savepoint.md %}) statement to accept any name for a savepoint. | `off` | Yes | Yes | +| `foreign_key_cascades_limit` | Limits the number of [cascading operations]({% link {{ page.version.version }}/foreign-key.md %}#use-a-foreign-key-constraint-with-cascade) that run as part of a single query. | `10000` | Yes | Yes | +| `idle_in_session_timeout` | Automatically terminates sessions that idle past the specified threshold.

When set to `0`, the session will not timeout. | The value set by the `sql.defaults.idle_in_session_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`0s`, by default). | Yes | Yes | +| `idle_in_transaction_session_timeout` | Automatically terminates sessions that are idle in a transaction past the specified threshold.

When set to `0`, the session will not timeout. | The value set by the `sql.defaults.idle_in_transaction_session_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (0s, by default). | Yes | Yes | +| `index_recommendations_enabled` | If `true`, display recommendations to create indexes required to eliminate full table scans.
For more details, see [Default statement plans]({% link {{ page.version.version }}/explain.md %}#default-statement-plans). | `true` | Yes | Yes | +| `inject_retry_errors_enabled` | If `true`, any statement executed inside of an explicit transaction (with the exception of [`SET`]({% link {{ page.version.version }}/set-vars.md %}) statements) will return a transaction retry error. If the client retries the transaction using the special [`cockroach_restart SAVEPOINT` name]({% link {{ page.version.version }}/savepoint.md %}#savepoints-for-client-side-transaction-retries), after the 3rd retry error, the transaction will proceed as normal. Otherwise, the errors will continue until `inject_retry_errors_enabled` is set to `false`. For more details, see [Test transaction retry logic]({% link {{ page.version.version }}/transaction-retry-error-example.md %}#test-transaction-retry-logic). | `false` | Yes | Yes | +| `intervalstyle` | The input string format for [`INTERVAL`]({% link {{ page.version.version }}/interval.md %}) values. Accepted values include `postgres`, `iso_8601`, and `sql_standard`. | The value set by the `sql.defaults.intervalstyle` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`postgres`, by default). | Yes | Yes | +| `is_superuser` | If `on` or `true`, the current user is a member of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role). | User-dependent | No | Yes | +| `large_full_scan_rows` | Determines which tables are considered "large" such that `disallow_full_table_scans` rejects full table or index scans of "large" tables. The default value is `0`, which disallows all full table or index scans. | User-dependent | No | Yes | +| `legacy_varchar_typing` | If `on`, type checking and overload resolution for [`VARCHAR`]({% link {{ page.version.version }}/string.md %}#related-types) types ignore overloads that cause errors, allowing comparisons between `VARCHAR` and non-`STRING`-like placeholder values to execute successfully. If `off`, type checking of these comparisons is more strict and must be handled with explicit type casts. | `off` | Yes | Yes | +| `locality` | The location of the node.

For more information, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality). | Node-dependent | No | Yes | +| `lock_timeout` | The amount of time a query can spend acquiring or waiting for a single [row-level lock]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#concurrency-control).
In CockroachDB, unlike in PostgreSQL, non-locking reads wait for conflicting locks to be released. As a result, the `lock_timeout` configuration applies to writes, and to locking and non-locking reads in read-write and read-only transactions.
If `lock_timeout = 0`, queries do not timeout due to lock acquisitions. | The value set by the `sql.defaults.lock_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`0`, by default) | Yes | Yes | +| `multiple_active_portals_enabled` | Whether to enable the [multiple active portals]({% link {{ page.version.version }}/postgresql-compatibility.md %}#multiple-active-portals) pgwire feature. | `false` | Yes | Yes | +| `node_id` | The ID of the node currently connected to.

This variable is particularly useful for verifying load balanced connections. | Node-dependent | No | Yes | +| `null_ordered_last` | Set the default ordering of `NULL`s. The default order is `NULL`s first for ascending order and `NULL`s last for descending order. | `false` | Yes | Yes | +| `optimizer_merge_joins_enabled` | If `on`, the optimizer will explore query plans with merge joins. | `on` | Yes | Yes | +| `optimizer_push_offset_into_index_join` | If `on`, the optimizer will attempt to push offset expressions into index join expressions to produce more efficient query plans. | `on` | Yes | Yes | +| `optimizer_use_forecasts` | If `on`, the optimizer uses forecasted statistics for query planning. | `on` | Yes | Yes | +| `optimizer_use_histograms` | If `on`, the optimizer uses collected histograms for cardinality estimation. | `on` | No | Yes | +| `optimizer_use_improved_multi_column_selectivity_estimate` | If `on`, the optimizer uses an improved selectivity estimate for multi-column predicates. | `on` | Yes | Yes | +| `optimizer_use_improved_zigzag_join_costing` | If `on`, the cost of [zigzag joins]({% link {{ page.version.version }}/cost-based-optimizer.md %}#zigzag-joins) is updated so they will be never be chosen over scans unless they produce fewer rows. To take effect, the [`enable_zigzag_join`](#enable-zigzag-join) setting must also be enabled. | `on` | Yes | Yes | +| `optimizer_use_lock_op_for_serializable` | If `on`, the optimizer uses a `Lock` operator to construct query plans for `SELECT` statements using the [`FOR UPDATE` and `FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) clauses. This setting only affects `SERIALIZABLE` transactions. `READ COMMITTED` transactions are evaluated with the `Lock` operator regardless of the setting. | `off` | Yes | Yes | +| `optimizer_use_multicol_stats` | If `on`, the optimizer uses collected multi-column statistics for cardinality estimation. | `on` | No | Yes | +| `optimizer_use_not_visible_indexes` | If `on`, the optimizer uses not visible indexes for planning. | `off` | No | Yes | +| `optimizer_use_virtual_computed_column_stats` | If `on`, the optimizer uses table statistics on [virtual computed columns]({% link {{ page.version.version }}/computed-columns.md %}#virtual-computed-columns). | `on` | Yes | Yes | +| `plan_cache_mode` | The type of plan that is cached in the [query plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache): `auto`, `force_generic_plan`, or `force_custom_plan`.

For more information, refer to [Query plan type]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type). | `auto` | Yes | Yes +| `plpgsql_use_strict_into` | If `on`, PL/pgSQL [`SELECT ... INTO` and `RETURNING ... INTO` statements]({% link {{ page.version.version }}/plpgsql.md %}#assign-a-result-to-a-variable) behave as though the `STRICT` option is specified. This causes the SQL statement to error if it does not return exactly one row. | `off` | Yes | Yes | +| `pg_trgm.similarity_threshold` | The threshold above which a [`%`]({% link {{ page.version.version }}/functions-and-operators.md %}#operators) string comparison returns `true`. The value must be between `0` and `1`. For more information, see [Trigram Indexes]({% link {{ page.version.version }}/trigram-indexes.md %}). | `0.3` | Yes | Yes | +| `prefer_lookup_joins_for_fks` | If `on`, the optimizer prefers [`lookup joins`]({% link {{ page.version.version }}/joins.md %}#lookup-joins) to [`merge joins`]({% link {{ page.version.version }}/joins.md %}#merge-joins) when performing [`foreign key`]({% link {{ page.version.version }}/foreign-key.md %}) checks. | `off` | Yes | Yes | +| `reorder_joins_limit` | Maximum number of joins that the optimizer will attempt to reorder when searching for an optimal query execution plan.

For more information, see [Join reordering]({% link {{ page.version.version }}/cost-based-optimizer.md %}#join-reordering). | `8` | Yes | Yes | +| `require_explicit_primary_keys` | If `on`, CockroachDB throws an error for all tables created without an explicit primary key defined. | `off` | Yes | Yes | +| `search_path` | A list of schemas that will be searched to resolve unqualified table or function names.
For more details, see [SQL name resolution]({% link {{ page.version.version }}/sql-name-resolution.md %}). | `public` | Yes | Yes | +| `serial_normalization` | Specifies the default handling of [`SERIAL`]({% link {{ page.version.version }}/serial.md %}) in table definitions. Valid options include `'rowid'`, `'virtual_sequence'`, `sql_sequence`, `sql_sequence_cached`, and `unordered_rowid`.
If set to `'virtual_sequence'`, the `SERIAL` type auto-creates a sequence for [better compatibility with Hibernate sequences](https://forum.cockroachlabs.com/t/hibernate-sequence-generator-returns-negative-number-and-ignore-unique-rowid/1885).
If set to `sql_sequence_cached`, you can use the `sql.defaults.serial_sequences_cache_size` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to control the number of values to cache in a user's session, with a default of 256.
If set to `unordered_rowid`, the `SERIAL` type generates a globally unique 64-bit integer (a combination of the insert timestamp and the ID of the node executing the statement) that does not have unique ordering. | `'rowid'` | Yes | Yes | +| `server_version` | The version of PostgreSQL that CockroachDB emulates. | Version-dependent | No | Yes | +| `server_version_num` | The version of PostgreSQL that CockroachDB emulates. | Version-dependent | Yes | Yes | +| `session_id` | The ID of the current session. | Session-dependent | No | Yes | +| `session_user` | The user connected for the current session. | User in connection string | No | Yes | +| `sql_safe_updates` | If `true`, the following potentially unsafe SQL statements are disallowed: [`DROP DATABASE`]({% link {{ page.version.version }}/drop-database.md %}) of a non-empty database and all dependent objects; [`DELETE`]({% link {{ page.version.version }}/delete.md %}) and [`UPDATE`]({% link {{ page.version.version }}/update.md %}) without a `WHERE` clause, unless a [`LIMIT`]({% link {{ page.version.version }}/limit-offset.md %}) clause is included; [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) and [`SELECT ... FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) without a `WHERE` or [`LIMIT`]({% link {{ page.version.version }}/limit-offset.md %}) clause; and [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column).
For more details, refer to [Allow potentially unsafe SQL statements]({% link {{ page.version.version }}/cockroach-sql.md %}#allow-potentially-unsafe-sql-statements). | `true` for interactive sessions from the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}),
`false` for sessions from other clients | Yes | Yes | +| `statement_timeout` | The amount of time a statement can run before being stopped.
This value can be an `int` (e.g., `10`) and will be interpreted as milliseconds. It can also be an interval or string argument, where the string can be parsed as a valid interval (e.g., `'4s'`).
A value of `0` turns it off. | The value set by the `sql.defaults.statement_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`0s`, by default). | Yes | Yes | +| `stub_catalog_tables` | If `off`, querying an unimplemented, empty [`pg_catalog`]({% link {{ page.version.version }}/pg-catalog.md %}) table will result in an error, as is the case in v20.2 and earlier. If `on`, querying an unimplemented, empty `pg_catalog` table simply returns no rows. | `on` | Yes | Yes | +| `timezone` | The default time zone for the current session. | `UTC` | Yes | Yes | +| `tracing` | The trace recording state. | `off` | Yes | Yes | +| `transaction_isolation` | The isolation level at which the transaction executes ([`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) or [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %})). See [Isolation levels]({% link {{ page.version.version }}/transactions.md %}#isolation-levels). | `SERIALIZABLE` | Yes | Yes | +| `transaction_priority` | The priority of the current transaction. See Transactions: Transaction priorities for more details. This session variable was called transaction priority (with a space) in CockroachDB 1.x. It has been renamed for compatibility with PostgreSQL. | `NORMAL` | Yes | Yes | +| `transaction_read_only` | The access mode of the current transaction. See [`SET TRANSACTION`]({% link {{ page.version.version }}/set-transaction.md %}) for more details. | `off` | Yes | Yes | +| `transaction_rows_read_err` | The limit for the number of rows read by a SQL transaction. If this value is exceeded the transaction will fail (or the event will be logged to `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes | +| `transaction_rows_read_log` | The threshold for the number of rows read by a SQL transaction. If this value is exceeded, the event will be logged to `SQL_PERF` (or `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes | +| `transaction_rows_written_err` | The limit for the number of rows written by a SQL transaction. If this value is exceeded the transaction will fail (or the event will be logged to `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes | +| `transaction_rows_written_log` | The threshold for the number of rows written by a SQL transaction. If this value is exceeded, the event will be logged to `SQL_PERF` (or `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes | +| `transaction_status` | The state of the current transaction. See [Transactions]({% link {{ page.version.version }}/transactions.md %}) for more details. | `NoTxn` | No | Yes | +| `transaction_timeout` | Aborts an explicit [transaction]({% link {{ page.version.version }}/transactions.md %}) when it runs longer than the configured duration. Stored in milliseconds; can be expressed in milliseconds or as an [`INTERVAL`]({% link {{ page.version.version }}/interval.md %}). | `0` | Yes | Yes | +| `troubleshooting_mode_enabled` | When enabled, avoid performing additional work on queries, such as collecting and emitting telemetry data. This session variable is particularly useful when the cluster is experiencing issues, unavailability, or failure. | `off` | Yes | Yes | +| `use_declarative_schema_changer` | Whether to use the declarative schema changer for supported statements. | `on` | Yes | Yes | +| `vector_search_beam_size` | The size of the vector search beam, which determines how many vector partitions are considered during query execution. For details, refer to [Tune vector indexes]({% link {{ page.version.version }}/vector-indexes.md %}#tune-vector-indexes). | `32` | Yes | Yes | +| `vectorize` | The vectorized execution engine mode. Options include `on` and `off`. For more details, see [Configure vectorized execution for CockroachDB]({% link {{ page.version.version }}/vectorized-execution.md %}#configure-vectorized-execution). | `on` | Yes | Yes | +| `virtual_cluster_name` | The name of the virtual cluster that the SQL client is connected to. | Session-dependent | No | Yes | + +The following session variables are exposed only for backwards compatibility with earlier CockroachDB releases and have no impact on how CockroachDB runs: + +| Variable name | Initial value | Modify with [`SET`]({% link {{ page.version.version }}/set-vars.md %})? | View with [`SHOW`]({% link {{ page.version.version }}/show-vars.md %})? | +|---|---|---|---| +| `backslash_quote` | `safe_encoding` | No | Yes | +| `client_encoding` | `UTF8` | No | Yes | +| `default_tablespace` | | No | Yes | +| `enable_drop_enum_value` | `off` | Yes | Yes | +| `enable_seqscan` | `on` | Yes | Yes | +| `escape_string_warning` | `on` | No | Yes | +| `experimental_enable_hash_sharded_indexes` | `off` | Yes | Yes | +| `integer_datetimes` | `on` | No | Yes | +| `max_identifier_length` | `128` | No | Yes | +| `max_index_keys` | `32` | No | Yes | +| `row_security` | `off` | No | Yes | +| `standard_conforming_strings` | `on` | No | Yes | +| `server_encoding` | `UTF8` | Yes | Yes | +| `synchronize_seqscans` | `on` | No | Yes | +| `synchronous_commit` | `on` | Yes | Yes | diff --git a/src/current/_includes/v25.3/misc/sorting-delete-output.md b/src/current/_includes/v25.3/misc/sorting-delete-output.md new file mode 100644 index 00000000000..b48a138a279 --- /dev/null +++ b/src/current/_includes/v25.3/misc/sorting-delete-output.md @@ -0,0 +1,9 @@ +To sort the output of a `DELETE` statement, use: + +{% include_cached copy-clipboard.html %} +~~~ sql +> WITH a AS (DELETE ... RETURNING ...) + SELECT ... FROM a ORDER BY ... +~~~ + +For an example, see [Sort and return deleted rows]({% link {{ page.version.version }}/delete.md %}#sort-and-return-deleted-rows). diff --git a/src/current/_includes/v25.3/misc/source-privileges.md b/src/current/_includes/v25.3/misc/source-privileges.md new file mode 100644 index 00000000000..543801a7201 --- /dev/null +++ b/src/current/_includes/v25.3/misc/source-privileges.md @@ -0,0 +1,12 @@ +The source file URL does _not_ require the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) in the following scenarios: + +- S3 and GS using `SPECIFIED` (and not `IMPLICIT`) credentials. Azure is always `SPECIFIED` by default. +- [Userfile]({% link {{ page.version.version }}/use-userfile-storage.md %}) + +The source file URL _does_ require the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) in the following scenarios: + +- S3 or GS using `IMPLICIT` credentials +- Use of a [custom endpoint](https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/) on S3 +- [Nodelocal]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}), [HTTP]({% link {{ page.version.version }}/use-a-local-file-server.md %}) or [HTTPS] ({% link {{ page.version.version }}/use-a-local-file-server.md %}) + +We recommend using [cloud storage]({% link {{ page.version.version }}/use-cloud-storage.md %}). diff --git a/src/current/_includes/v25.3/misc/storage-class-glacier-incremental.md b/src/current/_includes/v25.3/misc/storage-class-glacier-incremental.md new file mode 100644 index 00000000000..9daebd72c14 --- /dev/null +++ b/src/current/_includes/v25.3/misc/storage-class-glacier-incremental.md @@ -0,0 +1,5 @@ +{{site.data.alerts.callout_danger}} +[Incremental backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups) are **not** compatible with the [S3 Glacier Flexible Retrieval or Glacier Deep Archive storage classes](https://docs.aws.amazon.com/AmazonS3/latest/userguide//storage-class-intro.html#sc-glacier). Incremental backups require the reading of previous backups on an ad-hoc basis, which is not possible with backup files already in Glacier Flexible Retrieval or Glacier Deep Archive. This is because these storage classes do not allow immediate access to an S3 object without first [restoring the archived objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/restoring-objects.html) to its S3 bucket. + +Refer to [Incremental backups and storage classes]({% link {{ page.version.version }}/use-cloud-storage.md %}#incremental-backups-and-archive-storage-classes) for more detail. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/storage-classes.md b/src/current/_includes/v25.3/misc/storage-classes.md new file mode 100644 index 00000000000..c4dafce941e --- /dev/null +++ b/src/current/_includes/v25.3/misc/storage-classes.md @@ -0,0 +1 @@ +Use the parameter to set one of these [storage classes](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) listed in Amazon's documentation. For more general usage information, see Amazon's [Using Amazon S3 storage classes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) documentation. diff --git a/src/current/_includes/v25.3/misc/table-storage-parameters.md b/src/current/_includes/v25.3/misc/table-storage-parameters.md new file mode 100644 index 00000000000..3ca7f601648 --- /dev/null +++ b/src/current/_includes/v25.3/misc/table-storage-parameters.md @@ -0,0 +1,15 @@ +| Parameter name | Description | Data type | Default value | +|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|-----------|---------------| +| `exclude_data_from_backup` | Exclude the data in this table from any future backups. | Boolean | `false` | +| `schema_locked` | Disallow [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) on this table. Enabling `schema_locked` can help [improve performance of changefeeds]({% link {{ page.version.version }}/create-changefeed.md %}#disallow-schema-changes-on-tables-to-improve-changefeed-performance) running on this table. | Boolean | `false` | +| `sql_stats_automatic_collection_enabled` | Enable [automatic statistics collection]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) for this table. | Boolean | `true` | +| `sql_stats_automatic_collection_min_stale_rows` | Minimum number of stale rows in this table that will trigger a statistics refresh. | Integer | 500 | +| `sql_stats_automatic_collection_fraction_stale_rows` | Fraction of stale rows in this table that will trigger a statistics refresh. | Float | 0.2 | +| `sql_stats_forecasts_enabled` | Enable [forecasted statistics]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) collection for this table. | Boolean | `true` | + +The following parameters are included for PostgreSQL compatibility and do not affect how CockroachDB runs: + +- `autovacuum_enabled` +- `fillfactor` + +For the list of storage parameters that affect how [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) works, see the list of [TTL storage parameters]({% link {{ page.version.version }}/row-level-ttl.md %}#ttl-storage-parameters). \ No newline at end of file diff --git a/src/current/_includes/v25.3/misc/tooling.md b/src/current/_includes/v25.3/misc/tooling.md new file mode 100644 index 00000000000..f587b47babf --- /dev/null +++ b/src/current/_includes/v25.3/misc/tooling.md @@ -0,0 +1,107 @@ +## Support levels + +Cockroach Labs has partnered with open-source projects, vendors, and individuals to offer the following levels of support with third-party tools: + +- **Full support** indicates that Cockroach Labs is committed to maintaining compatibility with the vast majority of the tool's features. CockroachDB is regularly tested against the latest version documented in the table below. +- **Partial support** indicates that Cockroach Labs is working towards full support for the tool. The primary features of the tool are compatible with CockroachDB (e.g., connecting and basic database operations), but full integration may require additional steps, lack support for all features, or exhibit unexpected behavior. +- **Partner supported** indicates that Cockroach Labs has a partnership with a third-party vendor that provides support for the CockroachDB integration with their tool. + +{{site.data.alerts.callout_danger}} +Tools, drivers, or frameworks are considered **unsupported** if: + +- The tool, driver, or framework is not listed on this page. +- The version of a supported tool, driver, or framework is not listed on this page. + +If you encounter issues when using unsupported tools, drivers, or frameworks, contact the maintainer directly. + +Cockroach Labs provides "best effort" support for tools, drivers, and frameworks that are not officially supported. This means that while we will do our best to assist you, we may not be able to fully troubleshoot errors in your deployment. + +Customers should contact their account team before moving production workloads to CockroachDB that use unsupported drivers. +{{site.data.alerts.end}} + +{{site.data.alerts.callout_info}} +Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). +{{site.data.alerts.end}} + +If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. + +For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). + +## Drivers + +| Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | +|----------+--------+-----------------------+---------------------+---------------------+----------| +| C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | + +## Data access frameworks (e.g., ORMs) + +| Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | +|----------+-----------+-----------------------+---------------+---------------------+----------| +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | + +## Application frameworks + +| Framework | Data access | Latest tested version | Support level | Tutorial | +|-----------+-------------+-----------------------+---------------+----------| +| Spring | [JDBC]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-jdbc.md %})
[JPA (Hibernate)](build-a-spring-app-with-cockroachdb-jpa.html)
[MyBatis]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | See individual Java ORM or [driver](#drivers) for data access version support. | See individual Java ORM or [driver](#drivers) for data access support level. | [Build a Spring App with CockroachDB (JDBC)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-jdbc.md %})
[Build a Spring App with CockroachDB (JPA)](build-a-spring-app-with-cockroachdb-jpa.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) + +## Graphical user interfaces (GUIs) + +| GUI | Latest tested version | Support level | Tutorial | +|-----+-----------------------+---------------+----------| +| [DBeaver](https://dbeaver.com/) | 5.2.3 | Full | [Visualize CockroachDB Schemas with DBeaver]({% link {{ page.version.version }}/dbeaver.md %}) + +## Integrated development environments (IDEs) + +| IDE | Latest tested version | Support level | Tutorial | +|-----+-----------------------+---------------+----------| +| [DataGrip](https://www.jetbrains.com/datagrip/) | 2024.1 | Full | N/A +| [IntelliJ IDEA](https://www.jetbrains.com/idea/) | 2024.1 | Full | [Use IntelliJ IDEA with CockroachDB]({% link {{ page.version.version }}/intellij-idea.md %}) + +## Enhanced data security tools + +| Tool | Support level | Integration | +|-----+---------------+----------| +| [Satori](https://satoricyber.com/) | Partner supported | [Satori Integration]({% link {{ page.version.version }}/satori-integration.md %}) | +| [HashiCorp Vault](https://www.vaultproject.io/) | Partner supported | [HashiCorp Vault Integration]({% link {{ page.version.version }}/hashicorp-integration.md %}) | + +## Schema migration tools + +| Tool | Latest tested version | Support level | Tutorial | +|-----+------------------------+----------------+----------| +| [Alembic](https://alembic.sqlalchemy.org/en/latest/) | 1.7 | Full | [Migrate CockroachDB Schemas with Alembic]({% link {{ page.version.version }}/alembic.md %}) +| [Flyway](https://flywaydb.org/documentation/commandline/#download-and-installation) | 7.1.0 | Full | [Migrate CockroachDB Schemas with Flyway]({% link {{ page.version.version }}/flyway.md %}) +| [Liquibase](https://www.liquibase.org/download) | 4.2.0 | Full | [Migrate CockroachDB Schemas with Liquibase]({% link {{ page.version.version }}/liquibase.md %}) +| [Prisma](https://prisma.io) | 3.14.0 | Full | [Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html) + +## Data migration tools + +| Tool | Latest tested version | Support level | Documentation | +|-----+------------------------+----------------+----------| +| [AWS DMS](https://aws.amazon.com/dms/) | 3.4.6 | Full | [Migrate with AWS Database Migration Service (DMS)](aws-dms.html) +| [Qlik Replicate](https://www.qlik.com/us/products/qlik-replicate) | November 2022 | Full | [Migrate and Replicate Data with Qlik Replicate]({% link {{ page.version.version }}/qlik.md %}) +| [Striim](https://www.striim.com) | 4.1.2 | Full | [Migrate and Replicate Data with Striim]({% link {{ page.version.version }}/striim.md %}) +| [Oracle GoldenGate](https://www.oracle.com/integration/goldengate/) | 21.3 | Partial | [Migrate and Replicate Data with Oracle GoldenGate]({% link {{ page.version.version }}/goldengate.md %}) +| [Debezium](https://debezium.io/) | 2.4 | Full | [Migrate Data with Debezium]({% link {{ page.version.version }}/debezium.md %}) + +## Provisioning tools +| Tool | Latest tested version | Support level | Documentation | +|------+-----------------------+---------------+---------------| +| [Terraform](https://terraform.io/) | 1.3.2 | Partial | [Terraform provider for CockroachDB Cloud](https://github.com/cockroachdb/terraform-provider-cockroach#get-started) | + +## Other tools + +| Tool | Latest tested version | Support level | Tutorial | +|-----+------------------------+---------------+----------| +| [Flowable](https://github.com/flowable/flowable-engine) | 6.4.2 | Full | [Getting Started with Flowable and CockroachDB (external)](https://blog.flowable.org/2019/07/11/getting-started-with-flowable-and-cockroachdb/) diff --git a/src/current/_includes/v25.3/misc/userfile.md b/src/current/_includes/v25.3/misc/userfile.md new file mode 100644 index 00000000000..dbeb640a84b --- /dev/null +++ b/src/current/_includes/v25.3/misc/userfile.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} + CockroachDB now supports uploading files to a [user-scoped file storage]({% link {{ page.version.version }}/use-userfile-storage.md %}) using a SQL connection. We recommend using `userfile` instead of `nodelocal`, as it is user-scoped and more secure. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/multi-dimensional-metrics-table.md b/src/current/_includes/v25.3/multi-dimensional-metrics-table.md new file mode 100644 index 00000000000..d24c38b2cb9 --- /dev/null +++ b/src/current/_includes/v25.3/multi-dimensional-metrics-table.md @@ -0,0 +1,28 @@ +{% assign version = page.version.version | replace: ".", "" %} +{% assign metrics = site.data[version].metrics.multi-dimensional-metrics | where_exp: "metrics", "metrics.feature contains feature" | sort: "multi_dimensional_metric_id" %} +{% comment %} Fetch multi-dimensional-metrics for given feature. {% endcomment %} + +Following is a list of the metrics that have multi-dimensional metrics: + + + + + + + + + + + + {% for m in metrics %} {% comment %} Iterate through the metrics. {% endcomment %} + {% assign metrics-list = site.data[version].metrics.metrics-list | where: "metric", m.multi_dimensional_metric_id %} + {% comment %} Get the row from the metrics-list with the given child_metric_id. {% endcomment %} + + + + + + + {% endfor %} {% comment %} metrics {% endcomment %} + +
CockroachDB Metric Name{% if feature == "ldr" or feature == "detailed-latency" %}Description{% else %}Description When Aggregated{% endif %}TypeUnit
{{ m.multi_dimensional_metric_id }}
{% if metrics-list[0].description == null %}{{ m.description }}{% else %}{{ metrics-list[0].description }}{% endif %}{% if metrics-list[0].type == null %}{{ m.type }}{% else %}{{ metrics-list[0].type }}{% endif %}{% if metrics-list[0].unit == null %}{{ m.unit }}{% else %}{{ metrics-list[0].unit }}{% endif %}
\ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/apply-custom-resource.md b/src/current/_includes/v25.3/orchestration/apply-custom-resource.md new file mode 100644 index 00000000000..e7aacf41a1e --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/apply-custom-resource.md @@ -0,0 +1,6 @@ +Apply the new settings to the cluster: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ kubectl apply -f example.yaml +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/apply-helm-values.md b/src/current/_includes/v25.3/orchestration/apply-helm-values.md new file mode 100644 index 00000000000..90f9c8783f8 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/apply-helm-values.md @@ -0,0 +1,6 @@ +Apply the custom values to override the default Helm chart [values](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml): + +{% include_cached copy-clipboard.html %} +~~~ shell +$ helm upgrade {release-name} --values {custom-values}.yaml cockroachdb/cockroachdb +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/apply-statefulset-manifest.md b/src/current/_includes/v25.3/orchestration/apply-statefulset-manifest.md new file mode 100644 index 00000000000..0236903c497 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/apply-statefulset-manifest.md @@ -0,0 +1,6 @@ +Apply the new settings to the cluster: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ kubectl apply -f {statefulset-manifest}.yaml +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-basic-sql.md b/src/current/_includes/v25.3/orchestration/kubernetes-basic-sql.md new file mode 100644 index 00000000000..341c9bca23b --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-basic-sql.md @@ -0,0 +1,44 @@ +1. Run some basic [CockroachDB SQL statements]({% link {{ page.version.version }}/learn-cockroachdb-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE bank; + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO bank.accounts VALUES (1, 1000.50); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SELECT * FROM bank.accounts; + ~~~ + + ~~~ + id | balance + +----+---------+ + 1 | 1000.50 + (1 row) + ~~~ + +1. [Create a user with a password]({% link {{ page.version.version }}/create-user.md %}#create-a-user-with-a-password): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE USER roach WITH PASSWORD 'Q7gc8rEdS'; + ~~~ + + You will need this username and password to access the DB Console later. + +1. Exit the SQL shell and pod: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > \q + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-cockroach-cert.md b/src/current/_includes/v25.3/orchestration/kubernetes-cockroach-cert.md new file mode 100644 index 00000000000..12fa4d9783f --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-cockroach-cert.md @@ -0,0 +1,90 @@ +{{site.data.alerts.callout_info}} +The below steps use [`cockroach cert` commands]({% link {{ page.version.version }}/cockroach-cert.md %}) to quickly generate and sign the CockroachDB node and client certificates. Read our [Authentication]({% link {{ page.version.version }}/authentication.md %}#using-digital-certificates-with-cockroachdb) docs to learn about other methods of signing certificates. +{{site.data.alerts.end}} + +1. Create two directories: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir certs my-safe-directory + ~~~ + + Directory | Description + ----------|------------ + `certs` | You'll generate your CA certificate and all node and client certificates and keys in this directory. + `my-safe-directory` | You'll generate your CA key in this directory and then reference the key when generating node and client certificates. + +1. Create the CA certificate and key pair: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-ca \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Create a client certificate and key pair for the root user: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-client \ + root \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Upload the client certificate and key to the Kubernetes cluster as a secret: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create secret \ + generic cockroachdb.client.root \ + --from-file=certs + ~~~ + + ~~~ + secret/cockroachdb.client.root created + ~~~ + +1. Create the certificate and key pair for your CockroachDB nodes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-node \ + localhost 127.0.0.1 \ + cockroachdb-public \ + cockroachdb-public.default \ + cockroachdb-public.default.svc.cluster.local \ + *.cockroachdb \ + *.cockroachdb.default \ + *.cockroachdb.default.svc.cluster.local \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Upload the node certificate and key to the Kubernetes cluster as a secret: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create secret \ + generic cockroachdb.node \ + --from-file=certs + ~~~ + + ~~~ + secret/cockroachdb.node created + ~~~ + +1. Check that the secrets were created on the cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get secrets + ~~~ + + ~~~ + NAME TYPE DATA AGE + cockroachdb.client.root Opaque 3 41m + cockroachdb.node Opaque 5 14s + default-token-6qjdb kubernetes.io/service-account-token 3 4m + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-expand-disk-helm.md b/src/current/_includes/v25.3/orchestration/kubernetes-expand-disk-helm.md new file mode 100644 index 00000000000..4ec3d2f171f --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-expand-disk-helm.md @@ -0,0 +1,118 @@ +You can expand certain [types of persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes +) (including GCE Persistent Disk and Amazon Elastic Block Store) by editing their persistent volume claims. + +{{site.data.alerts.callout_info}} +These steps assume you followed the tutorial [Deploy CockroachDB on Kubernetes](deploy-cockroachdb-with-kubernetes.html?filters=helm). +{{site.data.alerts.end}} + +1. Get the persistent volume claims for the volumes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-my-release-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-my-release-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + ~~~ + +1. In order to expand a persistent volume claim, `AllowVolumeExpansion` in its storage class must be `true`. Examine the storage class: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe storageclass standard + ~~~ + + ~~~ + Name: standard + IsDefaultClass: Yes + Annotations: storageclass.kubernetes.io/is-default-class=true + Provisioner: kubernetes.io/gce-pd + Parameters: type=pd-standard + AllowVolumeExpansion: False + MountOptions: + ReclaimPolicy: Delete + VolumeBindingMode: Immediate + Events: + ~~~ + + If necessary, edit the storage class: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl patch storageclass standard -p '{"allowVolumeExpansion": true}' + ~~~ + + ~~~ + storageclass.storage.k8s.io/standard patched + ~~~ + +1. Edit one of the persistent volume claims to request more space: + + {{site.data.alerts.callout_info}} + The requested `storage` value must be larger than the previous value. You cannot use this method to decrease the disk size. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl patch pvc datadir-my-release-cockroachdb-0 -p '{"spec": {"resources": {"requests": {"storage": "200Gi"}}}}' + ~~~ + + ~~~ + persistentvolumeclaim/datadir-my-release-cockroachdb-0 patched + ~~~ + +1. Check the capacity of the persistent volume claim: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc datadir-my-release-cockroachdb-0 + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 18m + ~~~ + + If the PVC capacity has not changed, this may be because `AllowVolumeExpansion` was initially set to `false` or because the [volume has a file system](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim) that has to be expanded. You will need to start or restart a pod in order to have it reflect the new capacity. + + {{site.data.alerts.callout_success}} + Running `kubectl get pv` will display the persistent volumes with their *requested* capacity and not their actual capacity. This can be misleading, so it's best to use `kubectl get pvc`. + {{site.data.alerts.end}} + +1. Examine the persistent volume claim. If the volume has a file system, you will see a `FileSystemResizePending` condition with an accompanying message: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe pvc datadir-my-release-cockroachdb-0 + ~~~ + + ~~~ + Waiting for user to (re-)start a pod to finish file system resize of volume on node. + ~~~ + +1. Delete the corresponding pod to restart it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pod my-release-cockroachdb-0 + ~~~ + + The `FileSystemResizePending` condition and message will be removed. + +1. View the updated persistent volume claim: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc datadir-my-release-cockroachdb-0 + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 200Gi RWO standard 20m + ~~~ + +1. The CockroachDB cluster needs to be expanded one node at a time. Repeat steps 3 - 6 to increase the capacities of the remaining volumes by the same amount. \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-expand-disk-manual.md b/src/current/_includes/v25.3/orchestration/kubernetes-expand-disk-manual.md new file mode 100644 index 00000000000..e6cf4bbbddb --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-expand-disk-manual.md @@ -0,0 +1,118 @@ +You can expand certain [types of persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes +) (including GCE Persistent Disk and Amazon Elastic Block Store) by editing their persistent volume claims. + +{{site.data.alerts.callout_info}} +These steps assume you followed the tutorial [Deploy CockroachDB on Kubernetes](deploy-cockroachdb-with-kubernetes.html?filters=manual). +{{site.data.alerts.end}} + +1. Get the persistent volume claims for the volumes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + ~~~ + +1. In order to expand a persistent volume claim, `AllowVolumeExpansion` in its storage class must be `true`. Examine the storage class: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe storageclass standard + ~~~ + + ~~~ + Name: standard + IsDefaultClass: Yes + Annotations: storageclass.kubernetes.io/is-default-class=true + Provisioner: kubernetes.io/gce-pd + Parameters: type=pd-standard + AllowVolumeExpansion: False + MountOptions: + ReclaimPolicy: Delete + VolumeBindingMode: Immediate + Events: + ~~~ + + If necessary, edit the storage class: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl patch storageclass standard -p '{"allowVolumeExpansion": true}' + ~~~ + + ~~~ + storageclass.storage.k8s.io/standard patched + ~~~ + +1. Edit one of the persistent volume claims to request more space: + + {{site.data.alerts.callout_info}} + The requested `storage` value must be larger than the previous value. You cannot use this method to decrease the disk size. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl patch pvc datadir-cockroachdb-0 -p '{"spec": {"resources": {"requests": {"storage": "200Gi"}}}}' + ~~~ + + ~~~ + persistentvolumeclaim/datadir-cockroachdb-0 patched + ~~~ + +1. Check the capacity of the persistent volume claim: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc datadir-cockroachdb-0 + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 18m + ~~~ + + If the PVC capacity has not changed, this may be because `AllowVolumeExpansion` was initially set to `false` or because the [volume has a file system](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim) that has to be expanded. You will need to start or restart a pod in order to have it reflect the new capacity. + + {{site.data.alerts.callout_success}} + Running `kubectl get pv` will display the persistent volumes with their *requested* capacity and not their actual capacity. This can be misleading, so it's best to use `kubectl get pvc`. + {{site.data.alerts.end}} + +1. Examine the persistent volume claim. If the volume has a file system, you will see a `FileSystemResizePending` condition with an accompanying message: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe pvc datadir-cockroachdb-0 + ~~~ + + ~~~ + Waiting for user to (re-)start a pod to finish file system resize of volume on node. + ~~~ + +1. Delete the corresponding pod to restart it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pod cockroachdb-0 + ~~~ + + The `FileSystemResizePending` condition and message will be removed. + +1. View the updated persistent volume claim: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc datadir-cockroachdb-0 + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 200Gi RWO standard 20m + ~~~ + +1. The CockroachDB cluster needs to be expanded one node at a time. Repeat steps 3 - 6 to increase the capacities of the remaining volumes by the same amount. \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-limitations.md b/src/current/_includes/v25.3/orchestration/kubernetes-limitations.md new file mode 100644 index 00000000000..5e9784c28d1 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-limitations.md @@ -0,0 +1,37 @@ +#### Kubernetes version + +To deploy CockroachDB {{page.version.version}}, Kubernetes 1.18 or higher is required. Cockroach Labs strongly recommends that you use a Kubernetes version that is [eligible for patch support by the Kubernetes project](https://kubernetes.io/releases/). + +#### Kubernetes Operator + +- The CockroachDB Kubernetes Operator currently deploys clusters in a single region. For multi-region deployments using manual configs, see [Orchestrate CockroachDB Across Multiple Kubernetes Clusters]({% link {{ page.version.version }}/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md %}). + +- Using the Operator, you can give a new cluster an arbitrary number of [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). However, a cluster's labels cannot be modified after it is deployed. To track the status of this limitation, refer to [#993](https://github.com/cockroachdb/cockroach-operator/issues/993) in the Operator project's issue tracker. + +{% unless page.name == "orchestrate-cockroachdb-with-kubernetes-multi-cluster.md" %} +#### Helm version + +The CockroachDB Helm chart requires Helm 3.0 or higher. If you attempt to use an incompatible Helm version, an error like the following occurs: + +~~~ shell +Error: UPGRADE FAILED: template: cockroachdb/templates/tests/client.yaml:6:14: executing "cockroachdb/templates/tests/client.yaml" at <.Values.networkPolicy.enabled>: nil pointer evaluating interface {}.enabled +~~~ + +The CockroachDB Helm chart is currently not under active development, and no new features are planned. However, Cockroach Labs remains committed to fully supporting the Helm chart by addressing defects, providing security patches, and addressing breaking changes due to deprecations in Kubernetes APIs. + +A deprecation notice for the Helm chart will be provided to customers a minimum of 6 months in advance of actual deprecation. +{% endunless %} + +#### Network + +Service Name Indication (SNI) is an extension to the TLS protocol which allows a client to indicate which hostname it is attempting to connect to at the start of the TCP handshake process. The server can present multiple certificates on the same IP address and TCP port number, and one server can serve multiple secure websites or API services even if they use different certificates. + +Due to its order of operations, the PostgreSQL wire protocol's implementation of TLS is not compatible with SNI-based routing in the Kubernetes ingress controller. Instead, use a TCP load balancer for CockroachDB that is not shared with other services. + +#### Resources + +When starting Kubernetes, select machines with at least **4 vCPUs** and **16 GiB** of memory, and provision at least **2 vCPUs** and **8 Gi** of memory to CockroachDB per pod. These minimum settings are used by default in this deployment guide, and are appropriate for testing purposes only. On a production deployment, you should adjust the resource settings for your workload. For details, see [Resource management]({% link {{ page.version.version }}/configure-cockroachdb-kubernetes.md %}#memory-and-cpu). + +#### Storage + +Kubernetes deployments use external persistent volumes that are often replicated by the provider. CockroachDB replicates data automatically, and this redundant layer of replication can impact performance. Using [local volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local) may improve performance. diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-helm.md b/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-helm.md new file mode 100644 index 00000000000..c6b3215bb74 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-helm.md @@ -0,0 +1,126 @@ +Before removing a node from your cluster, you must first decommission the node. This lets a node finish in-flight requests, rejects any new requests, and transfers all range replicas and range leases off the node. + +{{site.data.alerts.callout_danger}} +If you remove nodes without first telling CockroachDB to decommission them, you may cause data or even cluster unavailability. For more details about how this works and what to consider before removing nodes, see [Prepare for graceful shutdown](node-shutdown.html?filters=decommission#prepare-for-graceful-shutdown). +{{site.data.alerts.end}} + +1. Use the [`cockroach node status`]({% link {{ page.version.version }}/cockroach-node.md %}) command to get the internal IDs of nodes. For example, if you followed the steps in [Deploy CockroachDB with Kubernetes]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#step-3-use-the-built-in-sql-client) to launch a secure client pod, get a shell into the `cockroachdb-client-secure` pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach node status \ + --certs-dir=/cockroach-certs \ + --host=my-release-cockroachdb-public + ~~~ + + ~~~ + id | address | build | started_at | updated_at | is_available | is_live + +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+ + 1 | my-release-cockroachdb-0.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true + 2 | my-release-cockroachdb-2.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true + 3 | my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true + 4 | my-release-cockroachdb-3.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true + (4 rows) + ~~~ + + The pod uses the `root` client certificate created earlier to initialize the cluster, so there's no CSR approval required. + +1. Use the [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}) command to decommission the node with the highest number in its address, specifying its ID (in this example, node ID `4` because its address is `my-release-cockroachdb-3`): + + {{site.data.alerts.callout_info}} + You must decommission the node with the highest number in its address. Kubernetes will remove the pod for the node with the highest number in its address when you reduce the replica count. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach node decommission 4 \ + --certs-dir=/cockroach-certs \ + --host=my-release-cockroachdb-public + ~~~ + + You'll then see the decommissioning status print to `stderr` as it changes: + + ~~~ + id | is_live | replicas | is_decommissioning | membership | is_draining + -----+---------+----------+--------------------+-----------------+-------------- + 4 | true | 73 | true | decommissioning | false + ~~~ + + Once the node has been fully decommissioned, you'll see a confirmation: + + ~~~ + id | is_live | replicas | is_decommissioning | membership | is_draining + -----+---------+----------+--------------------+-----------------+-------------- + 4 | true | 0 | true | decommissioning | false + (1 row) + + No more data reported on target nodes. Please verify cluster health before removing the nodes. + ~~~ + +1. Once the node has been decommissioned, scale down your StatefulSet: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm upgrade \ + my-release \ + cockroachdb/cockroachdb \ + --set statefulset.replicas=3 \ + --reuse-values + ~~~ + +1. Verify that the pod was successfully removed: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-0 1/1 Running 0 51m + my-release-cockroachdb-1 1/1 Running 0 47m + my-release-cockroachdb-2 1/1 Running 0 3m + cockroachdb-client-secure 1/1 Running 0 15m + ... + ~~~ + +1. You should also remove the persistent volume that was mounted to the pod. Get the persistent volume claims for the volumes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-my-release-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-my-release-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-my-release-cockroachdb-3 Bound pvc-75e561ba-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + ~~~ + +1. Verify that the PVC with the highest number in its name is no longer mounted to a pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe pvc datadir-my-release-cockroachdb-3 + ~~~ + + ~~~ + Name: datadir-my-release-cockroachdb-3 + ... + Mounted By: + ~~~ + +1. Remove the persistent volume by deleting the PVC: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pvc datadir-my-release-cockroachdb-3 + ~~~ + + ~~~ + persistentvolumeclaim "datadir-my-release-cockroachdb-3" deleted + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-insecure.md b/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-insecure.md new file mode 100644 index 00000000000..af48c9c6c30 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-insecure.md @@ -0,0 +1,140 @@ +To safely remove a node from your cluster, you must first decommission the node and only then adjust the `spec.replicas` value of your StatefulSet configuration to permanently remove it. This sequence is important because the decommissioning process lets a node finish in-flight requests, rejects any new requests, and transfers all range replicas and range leases off the node. + +{{site.data.alerts.callout_danger}} +If you remove nodes without first telling CockroachDB to decommission them, you may cause data or even cluster unavailability. For more details about how this works and what to consider before removing nodes, see [Prepare for graceful shutdown](node-shutdown.html?filters=decommission#prepare-for-graceful-shutdown). +{{site.data.alerts.end}} + +1. Launch a temporary interactive pod and use the `cockroach node status` command to get the internal IDs of nodes: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl run cockroachdb -it \ + --image=cockroachdb/cockroach:{{page.release_info.version}} \ + --rm \ + --restart=Never \ + -- node status \ + --insecure \ + --host=cockroachdb-public + ~~~ + + ~~~ + id | address | build | started_at | updated_at | is_available | is_live + +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+ + 1 | cockroachdb-0.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true + 2 | cockroachdb-2.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true + 3 | cockroachdb-1.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true + 4 | cockroachdb-3.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true + (4 rows) + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl run cockroachdb -it \ + --image=cockroachdb/cockroach:{{page.release_info.version}} \ + --rm \ + --restart=Never \ + -- node status \ + --insecure \ + --host=my-release-cockroachdb-public + ~~~ + + ~~~ + id | address | build | started_at | updated_at | is_available | is_live + +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+ + 1 | my-release-cockroachdb-0.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true + 2 | my-release-cockroachdb-2.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true + 3 | my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true + 4 | my-release-cockroachdb-3.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true + (4 rows) + ~~~ + +
+ +1. Note the ID of the node with the highest number in its address (in this case, the address including `cockroachdb-3`) and use the [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}) command to decommission it: + + {{site.data.alerts.callout_info}} + It's important to decommission the node with the highest number in its address because, when you reduce the replica count, Kubernetes will remove the pod for that node. + {{site.data.alerts.end}} + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl run cockroachdb -it \ + --image=cockroachdb/cockroach:{{page.release_info.version}} \ + --rm \ + --restart=Never \ + -- node decommission \ + --insecure \ + --host=cockroachdb-public + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl run cockroachdb -it \ + --image=cockroachdb/cockroach:{{page.release_info.version}} \ + --rm \ + --restart=Never \ + -- node decommission \ + --insecure \ + --host=my-release-cockroachdb-public + ~~~ + +
+ + You'll then see the decommissioning status print to `stderr` as it changes: + + ~~~ + id | is_live | replicas | is_decommissioning | membership | is_draining + -----+---------+----------+--------------------+-----------------+-------------- + 4 | true | 73 | true | decommissioning | false + ~~~ + + Once the node has been fully decommissioned, you'll see a confirmation: + + ~~~ + id | is_live | replicas | is_decommissioning | membership | is_draining + -----+---------+----------+--------------------+-----------------+-------------- + 4 | true | 0 | true | decommissioning | false + (1 row) + + No more data reported on target nodes. Please verify cluster health before removing the nodes. + ~~~ + +1. Once the node has been decommissioned, remove a pod from your StatefulSet: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl scale statefulset cockroachdb --replicas=3 + ~~~ + + ~~~ + statefulset "cockroachdb" scaled + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm upgrade \ + my-release \ + cockroachdb/cockroachdb \ + --set statefulset.replicas=3 \ + --reuse-values + ~~~ + +
diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-manual.md b/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-manual.md new file mode 100644 index 00000000000..753c030bf70 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-remove-nodes-manual.md @@ -0,0 +1,126 @@ +Before removing a node from your cluster, you must first decommission the node. This lets a node finish in-flight requests, rejects any new requests, and transfers all range replicas and range leases off the node. + +{{site.data.alerts.callout_danger}} +If you remove nodes without first telling CockroachDB to decommission them, you may cause data or even cluster unavailability. For more details about how this works and what to consider before removing nodes, see [Prepare for graceful shutdown](node-shutdown.html?filters=decommission#prepare-for-graceful-shutdown). +{{site.data.alerts.end}} + +1. Use the [`cockroach node status`]({% link {{ page.version.version }}/cockroach-node.md %}) command to get the internal IDs of nodes. For example, if you followed the steps in [Deploy CockroachDB with Kubernetes]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#step-3-use-the-built-in-sql-client) to launch a secure client pod, get a shell into the `cockroachdb-client-secure` pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach node status \ + --certs-dir=/cockroach-certs \ + --host=cockroachdb-public + ~~~ + + ~~~ + id | address | build | started_at | updated_at | is_available | is_live + +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+ + 1 | cockroachdb-0.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true + 2 | cockroachdb-2.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true + 3 | cockroachdb-1.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true + 4 | cockroachdb-3.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true + (4 rows) + ~~~ + + The pod uses the `root` client certificate created earlier to initialize the cluster, so there's no CSR approval required. + +1. Use the [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}) command to decommission the node with the highest number in its address, specifying its ID (in this example, node ID `4` because its address is `cockroachdb-3`): + + {{site.data.alerts.callout_info}} + You must decommission the node with the highest number in its address. Kubernetes will remove the pod for the node with the highest number in its address when you reduce the replica count. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach node decommission 4 \ + --certs-dir=/cockroach-certs \ + --host=cockroachdb-public + ~~~ + + You'll then see the decommissioning status print to `stderr` as it changes: + + ~~~ + id | is_live | replicas | is_decommissioning | membership | is_draining + -----+---------+----------+--------------------+-----------------+-------------- + 4 | true | 73 | true | decommissioning | false + ~~~ + + Once the node has been fully decommissioned, you'll see a confirmation: + + ~~~ + id | is_live | replicas | is_decommissioning | membership | is_draining + -----+---------+----------+--------------------+-----------------+-------------- + 4 | true | 0 | true | decommissioning | false + (1 row) + + No more data reported on target nodes. Please verify cluster health before removing the nodes. + ~~~ + +1. Once the node has been decommissioned, scale down your StatefulSet: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl scale statefulset cockroachdb --replicas=3 + ~~~ + + ~~~ + statefulset.apps/cockroachdb scaled + ~~~ + +1. Verify that the pod was successfully removed: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-0 1/1 Running 0 51m + cockroachdb-1 1/1 Running 0 47m + cockroachdb-2 1/1 Running 0 3m + cockroachdb-client-secure 1/1 Running 0 15m + ... + ~~~ + +1. You should also remove the persistent volume that was mounted to the pod. Get the persistent volume claims for the volumes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pvc + ~~~ + + ~~~ + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + datadir-cockroachdb-3 Bound pvc-75e561ba-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m + ~~~ + +1. Verify that the PVC with the highest number in its name is no longer mounted to a pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe pvc datadir-cockroachdb-3 + ~~~ + + ~~~ + Name: datadir-cockroachdb-3 + ... + Mounted By: + ~~~ + +1. Remove the persistent volume by deleting the PVC: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pvc datadir-cockroachdb-3 + ~~~ + + ~~~ + persistentvolumeclaim "datadir-cockroachdb-3" deleted + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-scale-cluster-helm.md b/src/current/_includes/v25.3/orchestration/kubernetes-scale-cluster-helm.md new file mode 100644 index 00000000000..474a87c1077 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-scale-cluster-helm.md @@ -0,0 +1,118 @@ +Before scaling CockroachDB, ensure that your Kubernetes cluster has enough worker nodes to host the number of pods you want to add. This is to ensure that two pods are not placed on the same worker node, as recommended in our [production guidance]({% link {{ page.version.version }}/recommended-production-settings.md %}#topology). + +For example, if you want to scale from 3 CockroachDB nodes to 4, your Kubernetes cluster should have at least 4 worker nodes. You can verify the size of your Kubernetes cluster by running `kubectl get nodes`. + +1. Edit your StatefulSet configuration to add another pod for the new CockroachDB node: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm upgrade \ + my-release \ + cockroachdb/cockroachdb \ + --set statefulset.replicas=4 \ + --reuse-values + ~~~ + + ~~~ + Release "my-release" has been upgraded. Happy Helming! + LAST DEPLOYED: Tue May 14 14:06:43 2019 + NAMESPACE: default + STATUS: DEPLOYED + + RESOURCES: + ==> v1beta1/PodDisruptionBudget + NAME AGE + my-release-cockroachdb-budget 51m + + ==> v1/Pod(related) + + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-0 1/1 Running 0 38m + my-release-cockroachdb-1 1/1 Running 0 39m + my-release-cockroachdb-2 1/1 Running 0 39m + my-release-cockroachdb-3 0/1 Pending 0 0s + my-release-cockroachdb-init-nwjkh 0/1 Completed 0 39m + + ... + ~~~ + +1. Get the name of the `Pending` CSR for the new pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get csr + ~~~ + + ~~~ + NAME AGE REQUESTOR CONDITION + default.client.root 1h system:serviceaccount:default:default Approved,Issued + default.node.my-release-cockroachdb-0 1h system:serviceaccount:default:default Approved,Issued + default.node.my-release-cockroachdb-1 1h system:serviceaccount:default:default Approved,Issued + default.node.my-release-cockroachdb-2 1h system:serviceaccount:default:default Approved,Issued + default.node.my-release-cockroachdb-3 2m system:serviceaccount:default:default Pending + node-csr-0Xmb4UTVAWMEnUeGbW4KX1oL4XV_LADpkwjrPtQjlZ4 1h kubelet Approved,Issued + node-csr-NiN8oDsLhxn0uwLTWa0RWpMUgJYnwcFxB984mwjjYsY 1h kubelet Approved,Issued + node-csr-aU78SxyU69pDK57aj6txnevr7X-8M3XgX9mTK0Hso6o 1h kubelet Approved,Issued + ... + ~~~ + + If you do not see a `Pending` CSR, wait a minute and try again. + +1. Examine the CSR for the new pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl describe csr default.node.my-release-cockroachdb-3 + ~~~ + + ~~~ + Name: default.node.my-release-cockroachdb-3 + Labels: + Annotations: + CreationTimestamp: Thu, 09 Nov 2017 13:39:37 -0500 + Requesting User: system:serviceaccount:default:default + Status: Pending + Subject: + Common Name: node + Serial Number: + Organization: Cockroach + Subject Alternative Names: + DNS Names: localhost + my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local + my-release-cockroachdb-1.my-release-cockroachdb + my-release-cockroachdb-public + my-release-cockroachdb-public.default.svc.cluster.local + IP Addresses: 127.0.0.1 + 10.48.1.6 + Events: + ~~~ + +1. If everything looks correct, approve the CSR for the new pod: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl certificate approve default.node.my-release-cockroachdb-3 + ~~~ + + ~~~ + certificatesigningrequest.certificates.k8s.io/default.node.my-release-cockroachdb-3 approved + ~~~ + +1. Verify that the new pod started successfully: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-0 1/1 Running 0 51m + my-release-cockroachdb-1 1/1 Running 0 47m + my-release-cockroachdb-2 1/1 Running 0 3m + my-release-cockroachdb-3 1/1 Running 0 1m + cockroachdb-client-secure 1/1 Running 0 15m + ... + ~~~ + +1. You can also open the [**Node List**]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}#node-list) in the DB Console to ensure that the fourth node successfully joined the cluster. \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-scale-cluster-manual.md b/src/current/_includes/v25.3/orchestration/kubernetes-scale-cluster-manual.md new file mode 100644 index 00000000000..050c6a252da --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-scale-cluster-manual.md @@ -0,0 +1,51 @@ +Before scaling up CockroachDB, note the following [topology recommendations]({% link {{ page.version.version }}/recommended-production-settings.md %}#topology): + +- Each CockroachDB node (running in its own pod) should run on a separate Kubernetes worker node. +- Each availability zone should have the same number of CockroachDB nodes. + +If your cluster has 3 CockroachDB nodes distributed across 3 availability zones (as in our [deployment example](deploy-cockroachdb-with-kubernetes.html?filters=manual)), we recommend scaling up by a multiple of 3 to retain an even distribution of nodes. You should therefore scale up to a minimum of 6 CockroachDB nodes, with 2 nodes in each zone. + +1. Run `kubectl get nodes` to list the worker nodes in your Kubernetes cluster. There should be at least as many worker nodes as pods you plan to add. This ensures that no more than one pod will be placed on each worker node. + +1. Add worker nodes if necessary: + - On GKE, [resize your cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/resizing-a-cluster). If you deployed a [regional cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-regional-cluster) as we recommended, you will use `--num-nodes` to specify the desired number of worker nodes in each zone. For example: + + {% include_cached copy-clipboard.html %} + ~~~ shell + gcloud container clusters resize {cluster-name} --region {region-name} --num-nodes 2 + ~~~ + - On EKS, resize your [Worker Node Group](https://eksctl.io/usage/managing-nodegroups/#scaling). + - On GCE, resize your [Managed Instance Group](https://cloud.google.com/compute/docs/instance-groups/). + - On AWS, resize your [Auto Scaling Group](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html). + +1. Edit your StatefulSet configuration to add pods for each new CockroachDB node: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl scale statefulset cockroachdb --replicas=6 + ~~~ + + ~~~ + statefulset.apps/cockroachdb scaled + ~~~ + +1. Verify that the new pod started successfully: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-0 1/1 Running 0 51m + cockroachdb-1 1/1 Running 0 47m + cockroachdb-2 1/1 Running 0 3m + cockroachdb-3 1/1 Running 0 1m + cockroachdb-4 1/1 Running 0 1m + cockroachdb-5 1/1 Running 0 1m + cockroachdb-client-secure 1/1 Running 0 15m + ... + ~~~ + +1. You can also open the [**Node List**]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}#node-list) in the DB Console to ensure that the fourth node successfully joined the cluster. \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-simulate-failure.md b/src/current/_includes/v25.3/orchestration/kubernetes-simulate-failure.md new file mode 100644 index 00000000000..6df9b394177 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-simulate-failure.md @@ -0,0 +1,91 @@ +Based on the `replicas: 3` line in the StatefulSet configuration, Kubernetes ensures that three pods/nodes are running at all times. When a pod/node fails, Kubernetes automatically creates another pod/node with the same network identity and persistent storage. + +To see this in action: + +1. Terminate one of the CockroachDB nodes: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pod cockroachdb-2 + ~~~ + + ~~~ + pod "cockroachdb-2" deleted + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pod cockroachdb-2 + ~~~ + + ~~~ + pod "cockroachdb-2" deleted + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl delete pod my-release-cockroachdb-2 + ~~~ + + ~~~ + pod "my-release-cockroachdb-2" deleted + ~~~ + +
+ + +1. In the DB Console, the **Cluster Overview** will soon show one node as **Suspect**. As Kubernetes auto-restarts the node, watch how the node once again becomes healthy. + +1. Back in the terminal, verify that the pod was automatically restarted: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pod cockroachdb-2 + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-2 1/1 Running 0 12s + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pod cockroachdb-2 + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-2 1/1 Running 0 12s + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pod my-release-cockroachdb-2 + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-2 1/1 Running 0 44s + ~~~ + +
diff --git a/src/current/_includes/v25.3/orchestration/kubernetes-stop-cluster.md b/src/current/_includes/v25.3/orchestration/kubernetes-stop-cluster.md new file mode 100644 index 00000000000..58d79611e6d --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/kubernetes-stop-cluster.md @@ -0,0 +1,141 @@ +To shut down the CockroachDB cluster: + +
+{% capture latest_operator_version %}{% include_cached latest_operator_version.md %}{% endcapture %} + +1. Delete the previously created custom resource: + + {% include_cached copy-clipboard.html %} + ~~~ shell + kubectl delete -f example.yaml + ~~~ + +1. Remove the Operator: + + {% include_cached copy-clipboard.html %} + ~~~ shell + kubectl delete -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/operator.yaml + ~~~ + + This will delete the CockroachDB cluster being run by the Operator. It intentionally does **not** delete: + - The persistent volumes that were attached to the pods, to avoid the risk of data loss. Before deleting a cluster's persistent volumes, be sure to back them up. For more information, refer to [Delete a Cluster's Persistent Volumes](#delete-a-clusters-persistent-volumes) in the Kubernetes project's documentation. + - Any secrets you may have created. For more information on managing secrets, refer to [Managing Secrets Using `kubectl`](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl) in the Kubernetes project's documentation. + +
+ +
+ +This procedure shuts down the CockroachDB cluster and deletes the resources you just created, including the logs and Prometheus and Alertmanager resources. This command intentionally does **not** delete: + +- The persistent volumes that were attached to the pods, to avoid the risk of data loss. Before deleting a cluster's persistent volumes, be sure to back them up. For more information, refer to [Delete a Cluster's Persistent Volumes](#delete-a-clusters-persistent-volumes) in the Kubernetes project's documentation. +- Any secrets you may have created. For more information on managing secrets, refer to [Managing Secrets Using `kubectl`](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl) in the Kubernetes project's documentation. + +{{site.data.alerts.callout_danger}} +Do **not** use the `--all` flag to `kubectl delete`, to avoid the risk of data loss. +{{site.data.alerts.end}} + +1. Delete the resources associated with the `cockroachdb` label, including the logs and Prometheus and Alertmanager resources. This command is very long; you may need to scroll your browser to read all of it. + + {% include_cached copy-clipboard.html %} + ~~~ shell + kubectl delete \ + pods,statefulsets,services,poddisruptionbudget,jobs,rolebinding,clusterrolebinding,role,clusterrole,serviceaccount,alertmanager,prometheus,prometheusrule,serviceMonitor \ + -l app=cockroachdb + ~~~ + + ~~~ + pod "cockroachdb-0" deleted + pod "cockroachdb-1" deleted + pod "cockroachdb-2" deleted + statefulset.apps "alertmanager-cockroachdb" deleted + statefulset.apps "prometheus-cockroachdb" deleted + service "alertmanager-cockroachdb" deleted + service "cockroachdb" deleted + service "cockroachdb-public" deleted + poddisruptionbudget.policy "cockroachdb-budget" deleted + job.batch "cluster-init-secure" deleted + rolebinding.rbac.authorization.k8s.io "cockroachdb" deleted + clusterrolebinding.rbac.authorization.k8s.io "cockroachdb" deleted + clusterrolebinding.rbac.authorization.k8s.io "prometheus" deleted + role.rbac.authorization.k8s.io "cockroachdb" deleted + clusterrole.rbac.authorization.k8s.io "cockroachdb" deleted + clusterrole.rbac.authorization.k8s.io "prometheus" deleted + serviceaccount "cockroachdb" deleted + serviceaccount "prometheus" deleted + alertmanager.monitoring.coreos.com "cockroachdb" deleted + prometheus.monitoring.coreos.com "cockroachdb" deleted + prometheusrule.monitoring.coreos.com "prometheus-cockroachdb-rules" deleted + servicemonitor.monitoring.coreos.com "cockroachdb" deleted + ~~~ + +1. Delete the pod created for `cockroach` client commands, if you didn't do so earlier: + + {% include_cached copy-clipboard.html %} + ~~~ shell + kubectl delete pod cockroachdb-client-secure + ~~~ + + ~~~ + pod "cockroachdb-client-secure" deleted + ~~~ + +{% capture get_issuers_command %}{% include_cached copy-clipboard.html %} + ~~~ shell + kubectl get issuer + ~~~ + ~~~ shell + kubectl delete issuer {issuer_name} + ~~~ +{% endcapture %} + +{% capture get_csrs_command %}{% include_cached copy-clipboard.html %} + ~~~ shell + kubectl get csr + ~~~ + ~~~ shell + kubectl delete csr default.client.root default.{node_name} + ~~~ +{% endcapture %} + +1. Delete the cluster's cryptographic resources. +
  • If your cluster's certificates are managed using cert-manager (recommended but not default), get the names of the cluster's issuers and delete them: {{ get_issuers_command }}
  • If your cluster uses self-signed certificates (the default), get the names of any CSRs for the cluster, then delete them: {{ get_csrs_command }}
+ +
+ +
+1. Uninstall the release: + + {% include_cached copy-clipboard.html %} + ~~~ shell + helm uninstall my-release + ~~~ + + ~~~ + release "my-release" deleted + ~~~ + +1. Delete the pod created for `cockroach` client commands, if you didn't do so earlier: + + {% include_cached copy-clipboard.html %} + ~~~ shell + kubectl delete pod cockroachdb-client-secure + ~~~ + + ~~~ + pod "cockroachdb-client-secure" deleted + ~~~ + +1. Delete the cluster's cryptographic resources. +
  • If your cluster's certificates are managed using cert-manager (recommended but not default), get the names of the cluster's issuers and delete them: {{ get_issuers_command }}
  • If your cluster uses self-signed certificates (the default), get the names of any CSRs for the cluster, then delete them: {{ get_csrs_command }}
+ +
+ +### Delete a cluster's persistent volumes + +If you need to free up the storage used by CockroachDB, you can optionally delete the persistent volumes that were attached to the pods, after first backing up your data. + +{{site.data.alerts.callout_danger}} +Before you delete a cluster's persistent volumes, be sure you have a backup copy of your data. Data **cannot** be recovered once the persistent volumes are deleted. For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/#persistent-volumes). +{{site.data.alerts.end}} + +Refer to the Kubernetes project's documentation for more information and recommendations. diff --git a/src/current/_includes/v25.3/orchestration/local-start-kubernetes.md b/src/current/_includes/v25.3/orchestration/local-start-kubernetes.md new file mode 100644 index 00000000000..7a62cd98fcc --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/local-start-kubernetes.md @@ -0,0 +1,22 @@ +## Before you begin + +Before getting started, it's helpful to review some Kubernetes-specific terminology: + +Feature | Description +--------|------------ +[minikube](http://kubernetes.io/docs/getting-started-guides/minikube/) | A tool commonly used to run a Kubernetes cluster on a local workstation. +[pod](http://kubernetes.io/docs/user-guide/pods/) | A pod is a group of one of more containers managed by Kubernetes. In this tutorial, all pods run on your local workstation. Each pod contains a single container that runs a single-node CockroachDB cluster. You'll start with 3 pods and grow to 4. +[StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) | A StatefulSet is a group of pods treated as stateful units, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart. +[persistent volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) | A persistent volume is storage mounted in a pod and available to its containers. The lifetime of a persistent volume is decoupled from the lifetime of the pod that's using it, ensuring that each CockroachDB node binds back to the same storage on restart.

When using `minikube`, persistent volumes are external temporary directories that endure until they are manually deleted or until the entire Kubernetes cluster is deleted. +[persistent volume claim](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims) | When e pod is created, it requests a persistent volume claim to claim durable storage for its node. + +## Step 1. Start Kubernetes + +1. Follow the [Minikube documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) to install the latest version of `minikube`, a hypervisor, and the `kubectl` command-line tool. + +1. Start a local Kubernetes cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + minikube start + ~~~ diff --git a/src/current/_includes/v25.3/orchestration/monitor-cluster.md b/src/current/_includes/v25.3/orchestration/monitor-cluster.md new file mode 100644 index 00000000000..171dbb24914 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/monitor-cluster.md @@ -0,0 +1,106 @@ +To access the cluster's [DB Console]({% link {{ page.version.version }}/ui-overview.md %}): + +{% if page.secure == true %} + +1. On secure clusters, [certain pages of the DB Console]({% link {{ page.version.version }}/ui-overview.md %}#db-console-access) can only be accessed by `admin` users. + + Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach sql \ + --certs-dir=/cockroach/cockroach-certs \ + --host=cockroachdb-public + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach sql \ + --certs-dir=/cockroach-certs \ + --host=cockroachdb-public + ~~~ + +
+ +
+ + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach sql \ + --certs-dir=/cockroach-certs \ + --host=my-release-cockroachdb-public +
+ +1. Assign `roach` to the `admin` role (you only need to do this once): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > GRANT admin TO roach; + ~~~ + +1. Exit the SQL shell and pod: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > \q + ~~~ + +{% endif %} + +1. In a new terminal window, port-forward from your local machine to the `cockroachdb-public` service: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl port-forward service/cockroachdb-public 8080 + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl port-forward service/cockroachdb-public 8080 + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl port-forward service/my-release-cockroachdb-public 8080 + ~~~ + +
+ + ~~~ + Forwarding from 127.0.0.1:8080 -> 8080 + ~~~ + + {{site.data.alerts.callout_info}}The port-forward command must be run on the same machine as the web browser in which you want to view the DB Console. If you have been running these commands from a cloud instance or other non-local shell, you will not be able to view the UI without configuring kubectl locally and running the above port-forward command on your local machine.{{site.data.alerts.end}} + +{% if page.secure == true %} + +1. Go to https://localhost:8080 and log in with the username and password you created earlier. + + {% include {{ page.version.version }}/misc/chrome-localhost.md %} + +{% else %} + +1. Go to http://localhost:8080. + +{% endif %} + +1. In the UI, verify that the cluster is running as expected: + - View the [Node List]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}#node-list) to ensure that all nodes successfully joined the cluster. + - Click the **Databases** tab on the left to verify that `bank` is listed. diff --git a/src/current/_includes/v25.3/orchestration/operator-check-namespace.md b/src/current/_includes/v25.3/orchestration/operator-check-namespace.md new file mode 100644 index 00000000000..bc37c6e1681 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/operator-check-namespace.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +All `kubectl` steps should be performed in the [namespace where you installed the Operator]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#install-the-operator). By default, this is `cockroach-operator-system`. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-helm-insecure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-helm-insecure.md new file mode 100644 index 00000000000..db3916f2fa9 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-helm-insecure.md @@ -0,0 +1,111 @@ +1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm repo add cockroachdb https://charts.cockroachdb.com/ + ~~~ + + ~~~ + "cockroachdb" has been added to your repositories + ~~~ + +1. Update your Helm chart repositories to ensure that you're using the [latest CockroachDB chart](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/Chart.yaml): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm repo update + ~~~ + +1. Modify our Helm chart's [`values.yaml`](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml) parameters for your deployment scenario. + + Create a `my-values.yaml` file to override the defaults in `values.yaml`, substituting your own values in this example based on the guidelines below. + + {% include_cached copy-clipboard.html %} + ~~~ + statefulset: + resources: + limits: + memory: "8Gi" + requests: + memory: "8Gi" + conf: + cache: "2Gi" + max-sql-memory: "2Gi" + ~~~ + + 1. To avoid running out of memory when CockroachDB is not the only pod on a Kubernetes node, you *must* set memory limits explicitly. This is because CockroachDB does not detect the amount of memory allocated to its pod when run in Kubernetes. We recommend setting `conf.cache` and `conf.max-sql-memory` each to 1/4 of the `memory` allocation specified in `statefulset.resources.requests` and `statefulset.resources.limits`. + + {{site.data.alerts.callout_success}} + For example, if you are allocating 8Gi of `memory` to each CockroachDB node, allocate 2Gi to `cache` and 2Gi to `max-sql-memory`. + {{site.data.alerts.end}} + +1. For an insecure deployment, set `tls.enabled` to `false`. For clarity, this example includes the example configuration from the previous steps. + + {% include_cached copy-clipboard.html %} + ~~~ + statefulset: + resources: + limits: + memory: "8Gi" + requests: + memory: "8Gi" + conf: + cache: "2Gi" + max-sql-memory: "2Gi" + tls: + enabled: false + ~~~ + + 1. You may want to modify `storage.persistentVolume.size` and `storage.persistentVolume.storageClass` for your use case. This chart defaults to 100Gi of disk space per pod. For more details on customizing disks for performance, see [these instructions]({% link {{ page.version.version }}/kubernetes-performance.md %}#disk-type). + + {{site.data.alerts.callout_info}} + If necessary, you can [expand disk size](/docs/{{ page.version.version }}/configure-cockroachdb-kubernetes.html?filters=helm#expand-disk-size) after the cluster is live. + {{site.data.alerts.end}} + +1. Install the CockroachDB Helm chart. + + Provide a "release" name to identify and track this particular deployment of the chart, and override the default values with those in `my-values.yaml`. + + {{site.data.alerts.callout_info}} + This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm install my-release --values my-values.yaml cockroachdb/cockroachdb + ~~~ + + Behind the scenes, this command uses our `cockroachdb-statefulset.yaml` file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart. + +1. Confirm that CockroachDB cluster initialization has completed successfully, with the pods for CockroachDB showing `1/1` under `READY` and the pod for initialization showing `COMPLETED` under `STATUS`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-0 1/1 Running 0 8m + my-release-cockroachdb-1 1/1 Running 0 8m + my-release-cockroachdb-2 1/1 Running 0 8m + my-release-cockroachdb-init-hxzsc 0/1 Completed 0 1h + ~~~ + +1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pv + ~~~ + + ~~~ + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-71019b3a-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 11m + pvc-7108e172-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 11m + pvc-710dcb66-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 11m + ~~~ + +{{site.data.alerts.callout_success}} +The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to logs for a pod, use `kubectl logs ` rather than checking the log on the persistent volume. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-helm-secure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-helm-secure.md new file mode 100644 index 00000000000..a2f3ebc52b8 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-helm-secure.md @@ -0,0 +1,110 @@ +The CockroachDB Helm chart is compatible with all Kubernetes versions that are [supported by the Kubernetes project](https://kubernetes.io/releases/) when cert-manager is used for mTLS. + +The CockroachDB Helm chart is currently not under active development, and no new features are planned. However, Cockroach Labs remains committed to fully supporting the Helm chart by addressing defects, providing security patches, and addressing breaking changes due to deprecations in Kubernetes APIs. + +A deprecation notice for the Helm chart will be provided to customers a minimum of 6 months in advance of actual deprecation. + +{{site.data.alerts.callout_danger}} +If you are running a secure Helm deployment on Kubernetes 1.22 and later, you must migrate away from using the Kubernetes CA for cluster authentication. The recommended approach is to use `cert-manager` for certificate management. For details, refer to [Deploy cert-manager for mTLS](secure-cockroachdb-kubernetes.html?filters=helm#deploy-cert-manager-for-mtls). +{{site.data.alerts.end}} + +1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm repo add cockroachdb https://charts.cockroachdb.com/ + ~~~ + + ~~~ + "cockroachdb" has been added to your repositories + ~~~ + +1. Update your Helm chart repositories to ensure that you're using the [latest CockroachDB chart](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/Chart.yaml): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm repo update + ~~~ + +1. The cluster configuration is set in the Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml). + + {{site.data.alerts.callout_info}} + By default, the Helm chart specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html?filters=helm). + {{site.data.alerts.end}} + + Before deploying, modify some parameters in our Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml): + + 1. Create a local YAML file (e.g., `my-values.yaml`) to specify your custom values. These will be used to override the defaults in `values.yaml`. + + 1. To avoid running out of memory when CockroachDB is not the only pod on a Kubernetes node, you *must* set memory limits explicitly. This is because CockroachDB does not detect the amount of memory allocated to its pod when run in Kubernetes. We recommend setting `conf.cache` and `conf.max-sql-memory` each to 1/4 of the `memory` allocation specified in `statefulset.resources.requests` and `statefulset.resources.limits`. + + {{site.data.alerts.callout_success}} + For example, if you are allocating 8Gi of `memory` to each CockroachDB node, allocate 2Gi to `cache` and 2Gi to `max-sql-memory`. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ yaml + conf: + cache: "2Gi" + max-sql-memory: "2Gi" + ~~~ + + The Helm chart defaults to a secure deployment by automatically setting `tls.enabled` to `true`. + + {{site.data.alerts.callout_info}} + By default, the Helm chart will generate and sign 1 client and 1 node certificate to secure the cluster. To authenticate using your own CA, see [Certificate management](/docs/{{ page.version.version }}/secure-cockroachdb-kubernetes.html?filters=helm#use-a-custom-ca). + {{site.data.alerts.end}} + + Refer to the [CockroachDB Helm chart's `values.yaml` template](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml). + +1. Install the CockroachDB Helm chart, specifying your custom values file. + + Provide a "release" name to identify and track this particular deployment of the chart, and override the default values with those in `my-values.yaml`. + + {{site.data.alerts.callout_info}} + This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands. + {{site.data.alerts.end}} + + {{site.data.alerts.callout_danger}} + To allow the CockroachDB pods to successfully deploy, do not set the [`--wait` flag](https://helm.sh/docs/intro/using_helm/#helpful-options-for-installupgraderollback) when using Helm commands. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm install my-release --values {custom-values}.yaml cockroachdb/cockroachdb + ~~~ + + Behind the scenes, this command uses our `cockroachdb-statefulset.yaml` file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart. + +1. Confirm that CockroachDB cluster initialization has completed successfully, with the pods for CockroachDB showing `1/1` under `READY` and the pod for initialization showing `COMPLETED` under `STATUS`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-0 1/1 Running 0 8m + my-release-cockroachdb-1 1/1 Running 0 8m + my-release-cockroachdb-2 1/1 Running 0 8m + my-release-cockroachdb-init-hxzsc 0/1 Completed 0 1h + ~~~ + +1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pv + ~~~ + + ~~~ + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-71019b3a-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 11m + pvc-7108e172-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 11m + pvc-710dcb66-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 11m + ~~~ + +{{site.data.alerts.callout_success}} +The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to logs for a pod, use `kubectl logs ` rather than checking the log on the persistent volume. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md new file mode 100644 index 00000000000..3406d48edbb --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md @@ -0,0 +1,114 @@ +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. + + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + ~~~ + + {{site.data.alerts.callout_info}} + By default, this manifest specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Resource management](configure-cockroachdb-kubernetes.html?filters=manual). + {{site.data.alerts.end}} + + Use the file to create the StatefulSet and start the cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create -f cockroachdb-statefulset.yaml + ~~~ + + ~~~ + service/cockroachdb-public created + service/cockroachdb created + poddisruptionbudget.policy/cockroachdb-budget created + statefulset.apps/cockroachdb created + ~~~ + + Alternatively, if you'd rather start with a configuration file that has been customized for performance: + + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + ~~~ + + 1. Modify the file wherever there is a `TODO` comment. + + 1. Use the file to create the StatefulSet and start the cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create -f cockroachdb-statefulset-insecure.yaml + ~~~ + +1. Confirm that three pods are `Running` successfully. Note that they will not + be considered `Ready` until after the cluster has been initialized: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-0 0/1 Running 0 2m + cockroachdb-1 0/1 Running 0 2m + cockroachdb-2 0/1 Running 0 2m + ~~~ + +1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get persistentvolumes + ~~~ + + ~~~ + NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE + pvc-52f51ecf-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-0 26s + pvc-52fd3a39-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-1 27s + pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s + ~~~ + +1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create \ + -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + ~~~ + + ~~~ + job.batch/cluster-init created + ~~~ + +1. Confirm that cluster initialization has completed successfully. The job should be considered successful and the Kubernetes pods should soon be considered `Ready`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get job cluster-init + ~~~ + + ~~~ + NAME COMPLETIONS DURATION AGE + cluster-init 1/1 7s 27s + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cluster-init-cqf8l 0/1 Completed 0 56s + cockroachdb-0 1/1 Running 0 7m51s + cockroachdb-1 1/1 Running 0 7m51s + cockroachdb-2 1/1 Running 0 7m51s + ~~~ + +{{site.data.alerts.callout_success}} +The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-helm-insecure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-helm-insecure.md new file mode 100644 index 00000000000..9a820070312 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-helm-insecure.md @@ -0,0 +1,126 @@ +1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm repo add cockroachdb https://charts.cockroachdb.com/ + ~~~ + + ~~~ + "cockroachdb" has been added to your repositories + ~~~ + +1. Update your Helm chart repositories to ensure that you're using the [latest CockroachDB chart](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/Chart.yaml): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm repo update + ~~~ + +1. The cluster configuration is set in the Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml). + + {{site.data.alerts.callout_info}} + By default, the Helm chart specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html?filters=helm). + {{site.data.alerts.end}} + + Before deploying, modify some parameters in our Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml): + + 1. Create a local YAML file (e.g., `my-values.yaml`) to specify your custom values. These will be used to override the defaults in `values.yaml`. + + 1. To avoid running out of memory when CockroachDB is not the only pod on a Kubernetes node, you *must* set memory limits explicitly. This is because CockroachDB does not detect the amount of memory allocated to its pod when run in Kubernetes. We recommend setting `conf.cache` and `conf.max-sql-memory` each to 1/4 of the `memory` allocation specified in `statefulset.resources.requests` and `statefulset.resources.limits`. + + {{site.data.alerts.callout_success}} + For example, if you are allocating 8Gi of `memory` to each CockroachDB node, allocate 2Gi to `cache` and 2Gi to `max-sql-memory`. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ yaml + conf: + cache: "2Gi" + max-sql-memory: "2Gi" + ~~~ + + The Helm chart defaults to a secure deployment by automatically setting `tls.enabled` to `true`. For an insecure deployment, set `tls.enabled` to `false`: + + {% include_cached copy-clipboard.html %} + ~~~ yaml + tls: + enabled: false + ~~~ + + Your values file should look similar to: + + {% include_cached copy-clipboard.html %} + ~~~ yaml + conf: + cache: "2Gi" + max-sql-memory: "2Gi" + tls: + enabled: false + ~~~ + + Refer to the [CockroachDB Helm chart's `values.yaml` template](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml). + +1. Install the CockroachDB Helm chart, specifying your custom values file. + + Provide a "release" name to identify and track this particular deployment of the chart, and override the default values with those in `my-values.yaml`. + + {{site.data.alerts.callout_info}} + This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands. + {{site.data.alerts.end}} + + {{site.data.alerts.callout_danger}} + To allow the CockroachDB pods to successfully deploy, do not set the [`--wait` flag](https://helm.sh/docs/intro/using_helm/#helpful-options-for-installupgraderollback) when using Helm commands. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm install my-release --values {custom-values}.yaml cockroachdb/cockroachdb + ~~~ + +1. Install the CockroachDB Helm chart. + + Provide a "release" name to identify and track this particular deployment of the chart. + + {{site.data.alerts.callout_info}} + This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ helm install my-release cockroachdb/cockroachdb + ~~~ + + Behind the scenes, this command uses our `cockroachdb-statefulset.yaml` file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart. + +1. Confirm that CockroachDB cluster initialization has completed successfully, with the pods for CockroachDB showing `1/1` under `READY` and the pod for initialization showing `COMPLETED` under `STATUS`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + my-release-cockroachdb-0 1/1 Running 0 8m + my-release-cockroachdb-1 1/1 Running 0 8m + my-release-cockroachdb-2 1/1 Running 0 8m + my-release-cockroachdb-init-hxzsc 0/1 Completed 0 1h + ~~~ + +1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pv + ~~~ + + ~~~ + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-71019b3a-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 11m + pvc-7108e172-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 11m + pvc-710dcb66-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 11m + ~~~ + +{{site.data.alerts.callout_success}} +The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md new file mode 100644 index 00000000000..552cb3cd25f --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md @@ -0,0 +1,83 @@ +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + ~~~ + + ~~~ + service/cockroachdb-public created + service/cockroachdb created + poddisruptionbudget.policy/cockroachdb-budget created + statefulset.apps/cockroachdb created + ~~~ + +1. Confirm that three pods are `Running` successfully. Note that they will not + be considered `Ready` until after the cluster has been initialized: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-0 0/1 Running 0 2m + cockroachdb-1 0/1 Running 0 2m + cockroachdb-2 0/1 Running 0 2m + ~~~ + +1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pv + ~~~ + + ~~~ + NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE + pvc-52f51ecf-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-0 26s + pvc-52fd3a39-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-1 27s + pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s + ~~~ + +1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create \ + -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + ~~~ + + ~~~ + job.batch/cluster-init created + ~~~ + +1. Confirm that cluster initialization has completed successfully. The job should be considered successful and the Kubernetes pods should soon be considered `Ready`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get job cluster-init + ~~~ + + ~~~ + NAME COMPLETIONS DURATION AGE + cluster-init 1/1 7s 27s + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cluster-init-cqf8l 0/1 Completed 0 56s + cockroachdb-0 1/1 Running 0 7m51s + cockroachdb-1 1/1 Running 0 7m51s + cockroachdb-2 1/1 Running 0 7m51s + ~~~ + +{{site.data.alerts.callout_success}} +The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-operator-secure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-operator-secure.md new file mode 100644 index 00000000000..5cbc1c49af9 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-operator-secure.md @@ -0,0 +1,110 @@ +### Install the Operator + +{% capture latest_operator_version %}{% include_cached latest_operator_version.md %}{% endcapture %} +{% capture apply_default_operator_manifest_command %}{% include_cached copy-clipboard.html %} + ~~~ shell + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/operator.yaml + ~~~ + ~~~ + clusterrole.rbac.authorization.k8s.io/cockroach-database-role created + serviceaccount/cockroach-database-sa created + clusterrolebinding.rbac.authorization.k8s.io/cockroach-database-rolebinding created + role.rbac.authorization.k8s.io/cockroach-operator-role created + clusterrolebinding.rbac.authorization.k8s.io/cockroach-operator-rolebinding created + clusterrole.rbac.authorization.k8s.io/cockroach-operator-role created + serviceaccount/cockroach-operator-sa created + rolebinding.rbac.authorization.k8s.io/cockroach-operator-default created + deployment.apps/cockroach-operator created + ~~~ +{% endcapture %} +{% capture download_operator_manifest_command %}{% include_cached copy-clipboard.html %} + ~~~ shell + curl -O https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/operator.yaml + ~~~ +{% endcapture %} +{% capture apply_local_operator_manifest_command %}{% include_cached copy-clipboard.html %} + ~~~ shell + kubectl apply -f operator.yaml + ~~~ +{% endcapture %} + +1. Apply the [custom resource definition (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) for the Operator: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/crds.yaml + ~~~ + + ~~~ + customresourcedefinition.apiextensions.k8s.io/crdbclusters.crdb.cockroachlabs.com created + ~~~ + +1. By default, the Operator is configured to install in the `cockroach-operator-system` namespace and to manage CockroachDB instances for all namespaces on the cluster.
  • To use these defaults, apply the Operator manifest without modifying it: {{ apply_default_operator_manifest_command }}
  • To change these defaults:
    1. Download the Operator manifest: {{ download_operator_manifest_command }}
    2. To use a custom namespace, edit all instances of namespace: cockroach-operator-system with your desired namespace.
    3. To limit the namespaces that will be monitored, set the WATCH_NAMESPACE environment variable in the Deployment pod spec. This can be set to a single namespace or a comma-delimited set of namespaces. When set, only those CrdbCluster resources in the supplied namespace(s) will be reconciled.
    4. Apply your local version of the Operator manifest to the cluster: {{ apply_local_operator_manifest_command }}
+ +1. Set your current namespace to the one used by the Operator. For example, to use the Operator's default namespace: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl config set-context --current --namespace=cockroach-operator-system + ~~~ + +1. Validate that the Operator is running: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroach-operator-6f7b86ffc4-9ppkv 1/1 Running 0 54s + ~~~ + +### Initialize the cluster + +{{site.data.alerts.callout_info}} +After a cluster managed by the Kubernetes operator is initialized, its Kubernetes labels cannot be modified. For more details, refer to [Best practices](#best-practices). +{{site.data.alerts.end}} + +1. Download `example.yaml`, a custom resource that tells the Operator how to configure the Kubernetes cluster. + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/examples/example.yaml + ~~~ + + By default, this custom resource specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html). + + {{site.data.alerts.callout_info}} + By default, the Operator will generate and sign 1 client and 1 node certificate to secure the cluster. This means that if you do not provide a CA, a `cockroach`-generated CA is used. If you want to authenticate using your own CA, [specify the generated secrets in the custom resource](secure-cockroachdb-kubernetes.html#use-a-custom-ca) **before** proceeding to the next step. + {{site.data.alerts.end}} + +1. Apply `example.yaml`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl apply -f example.yaml + ~~~ + + The Operator will create a StatefulSet and initialize the nodes as a cluster. + + ~~~ + crdbcluster.crdb.cockroachlabs.com/cockroachdb created + ~~~ + +1. Check that the pods were created: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroach-operator-6f7b86ffc4-9t9zb 1/1 Running 0 3m22s + cockroachdb-0 1/1 Running 0 2m31s + cockroachdb-1 1/1 Running 0 102s + cockroachdb-2 1/1 Running 0 46s + ~~~ + + Each pod should have `READY` status soon after being created. diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md new file mode 100644 index 00000000000..972cabc2d8e --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md @@ -0,0 +1,108 @@ +### Configure the cluster + +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + ~~~ + +1. Update `secretName` with the name of the corresponding node secret. + + The secret names depend on your method for generating secrets. For example, if you follow the below [steps using `cockroach cert`](#create-certificates), use this secret name: + + {% include_cached copy-clipboard.html %} + ~~~ yaml + secret: + secretName: cockroachdb.node + ~~~ + +1. The StatefulSet configuration deploys CockroachDB into the `default` namespace. To use a different namespace, search for `kind: RoleBinding` and change its `subjects.namespace` property to the name of the namespace. Otherwise, a `failed to read secrets` error occurs when you attempt to follow the steps in [Initialize the cluster](#initialize-the-cluster). + +{{site.data.alerts.callout_info}} +By default, this manifest specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html?filters=manual). +{{site.data.alerts.end}} + +### Create certificates + +{{site.data.alerts.callout_success}} +The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume. +{{site.data.alerts.end}} + +{% include {{ page.version.version }}/orchestration/kubernetes-cockroach-cert.md %} + +### Initialize the cluster + +1. Use the config file you downloaded to create the StatefulSet that automatically creates 3 pods, each running a CockroachDB node: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create -f cockroachdb-statefulset.yaml + ~~~ + + ~~~ + serviceaccount/cockroachdb created + role.rbac.authorization.k8s.io/cockroachdb created + rolebinding.rbac.authorization.k8s.io/cockroachdb created + service/cockroachdb-public created + service/cockroachdb created + poddisruptionbudget.policy/cockroachdb-budget created + statefulset.apps/cockroachdb created + ~~~ + +1. Initialize the CockroachDB cluster: + + 1. Confirm that three pods are `Running` successfully. Note that they will not be considered `Ready` until after the cluster has been initialized: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-0 0/1 Running 0 2m + cockroachdb-1 0/1 Running 0 2m + cockroachdb-2 0/1 Running 0 2m + ~~~ + + 1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pv + ~~~ + + ~~~ + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-9e435563-fb2e-11e9-a65c-42010a8e0fca 100Gi RWO Delete Bound default/datadir-cockroachdb-0 standard 51m + pvc-9e47d820-fb2e-11e9-a65c-42010a8e0fca 100Gi RWO Delete Bound default/datadir-cockroachdb-1 standard 51m + pvc-9e4f57f0-fb2e-11e9-a65c-42010a8e0fca 100Gi RWO Delete Bound default/datadir-cockroachdb-2 standard 51m + ~~~ + + 1. Run `cockroach init` on one of the pods to complete the node startup process and have them join together as a cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-0 \ + -- /cockroach/cockroach init \ + --certs-dir=/cockroach/cockroach-certs + ~~~ + + ~~~ + Cluster successfully initialized + ~~~ + + 1. Confirm that cluster initialization has completed successfully. The job should be considered successful and the Kubernetes pods should soon be considered `Ready`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl get pods + ~~~ + + ~~~ + NAME READY STATUS RESTARTS AGE + cockroachdb-0 1/1 Running 0 3m + cockroachdb-1 1/1 Running 0 3m + cockroachdb-2 1/1 Running 0 3m + ~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/start-kubernetes.md b/src/current/_includes/v25.3/orchestration/start-kubernetes.md new file mode 100644 index 00000000000..a9a24eb7948 --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/start-kubernetes.md @@ -0,0 +1,98 @@ +You can use the hosted [Google Kubernetes Engine (GKE)](#hosted-gke) service or the hosted [Amazon Elastic Kubernetes Service (EKS)](#hosted-eks) to quickly start Kubernetes. + +{{site.data.alerts.callout_info}} +GKE or EKS are not required to run CockroachDB on Kubernetes. A manual GCE or AWS cluster with the [minimum recommended Kubernetes version](#kubernetes-version) and at least 3 pods, each presenting [sufficient resources](#resources) to start a CockroachDB node, can also be used. +{{site.data.alerts.end}} + +### Hosted GKE + +1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. + + This includes installing `gcloud`, which is used to create and delete Kubernetes Engine clusters, and `kubectl`, which is the command-line tool used to manage Kubernetes from your workstation. + + {{site.data.alerts.callout_success}} + The documentation offers the choice of using Google's Cloud Shell product or using a local shell on your machine. Choose to use a local shell if you want to be able to view the DB Console using the steps in this guide. + {{site.data.alerts.end}} + +1. From your local workstation, start the Kubernetes cluster, specifying one of the available [regions](https://cloud.google.com/compute/docs/regions-zones#available) (e.g., `us-east1`): + + {{site.data.alerts.callout_success}} + Since this region can differ from your default `gcloud` region, be sure to include the `--region` flag to run `gcloud` commands against this cluster. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ gcloud container clusters create cockroachdb --machine-type n2-standard-4 --region {region-name} --num-nodes 1 + ~~~ + + ~~~ + Creating cluster cockroachdb...done. + ~~~ + + This creates GKE instances and joins them into a single Kubernetes cluster named `cockroachdb`. The `--region` flag specifies a [regional three-zone cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-regional-cluster), and `--num-nodes` specifies one Kubernetes worker node in each zone. + + The `--machine-type` flag tells the node pool to use the [`n2-standard-4`](https://cloud.google.com/compute/docs/machine-types#standard_machine_types) machine type (4 vCPUs, 16 GB memory), which meets our [recommended CPU and memory configuration]({% link {{ page.version.version }}/recommended-production-settings.md %}#basic-hardware-recommendations). + + The process can take a few minutes, so do not move on to the next step until you see a `Creating cluster cockroachdb...done` message and details about your cluster. + +1. Get the email address associated with your Google Cloud account: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ gcloud info | grep Account + ~~~ + + ~~~ + Account: [your.google.cloud.email@example.org] + ~~~ + + {{site.data.alerts.callout_danger}} + This command returns your email address in all lowercase. However, in the next step, you must enter the address using the accurate capitalization. For example, if your address is YourName@example.com, you must use YourName@example.com and not yourname@example.com. + {{site.data.alerts.end}} + +1. [Create the RBAC roles](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#prerequisites_for_using_role-based_access_control) CockroachDB needs for running on GKE, using the address from the previous step: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create clusterrolebinding $USER-cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user={your.google.cloud.email@example.org} + ~~~ + + ~~~ + clusterrolebinding.rbac.authorization.k8s.io/your.username-cluster-admin-binding created + ~~~ + +### Hosted EKS + +1. Complete the steps described in the [EKS Getting Started](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html) documentation. + + This includes installing and configuring the AWS CLI and `eksctl`, which is the command-line tool used to create and delete Kubernetes clusters on EKS, and `kubectl`, which is the command-line tool used to manage Kubernetes from your workstation. + + {{site.data.alerts.callout_info}} + If you are running [EKS-Anywhere](https://aws.amazon.com/eks/eks-anywhere/), CockroachDB requires that you [configure your default storage class](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/) to auto-provision persistent volumes. Alternatively, you can define a custom storage configuration as required by your install pattern. + {{site.data.alerts.end}} + +1. From your local workstation, start the Kubernetes cluster: + + {{site.data.alerts.callout_success}} + To ensure that all 3 nodes can be placed into a different availability zone, you may want to first [confirm that at least 3 zones are available in the region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#availability-zones-describe) for your account. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ eksctl create cluster \ + --name cockroachdb \ + --nodegroup-name standard-workers \ + --node-type m5.xlarge \ + --nodes 3 \ + --nodes-min 1 \ + --nodes-max 4 \ + --node-ami auto + ~~~ + + This creates EKS instances and joins them into a single Kubernetes cluster named `cockroachdb`. The `--node-type` flag tells the node pool to use the [`m5.xlarge`](https://aws.amazon.com/ec2/instance-types/) instance type (4 vCPUs, 16 GB memory), which meets our [recommended CPU and memory configuration]({% link {{ page.version.version }}/recommended-production-settings.md %}#basic-hardware-recommendations). + + Cluster provisioning usually takes between 10 and 15 minutes. Do not move on to the next step until you see a message like `[✔] EKS cluster "cockroachdb" in "us-east-1" region is ready` and details about your cluster. + +1. Open the [AWS CloudFormation console](https://console.aws.amazon.com/cloudformation/home) to verify that the stacks `eksctl-cockroachdb-cluster` and `eksctl-cockroachdb-nodegroup-standard-workers` were successfully created. Be sure that your region is selected in the console. \ No newline at end of file diff --git a/src/current/_includes/v25.3/orchestration/test-cluster-insecure.md b/src/current/_includes/v25.3/orchestration/test-cluster-insecure.md new file mode 100644 index 00000000000..3c94e27b70a --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/test-cluster-insecure.md @@ -0,0 +1,76 @@ +1. Launch a temporary interactive pod and start the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) inside it: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl run cockroachdb -it \ + --image=cockroachdb/cockroach:{{page.release_info.version}} \ + --rm \ + --restart=Never \ + -- sql \ + --insecure \ + --host=cockroachdb-public + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl run cockroachdb -it \ + --image=cockroachdb/cockroach:{{page.release_info.version}} \ + --rm \ + --restart=Never \ + -- sql \ + --insecure \ + --host=my-release-cockroachdb-public + ~~~ + +
+ +1. Run some basic [CockroachDB SQL statements]({% link {{ page.version.version }}/learn-cockroachdb-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE bank; + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE TABLE bank.accounts ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + balance DECIMAL + ); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > INSERT INTO bank.accounts (balance) + VALUES + (1000.50), (20000), (380), (500), (55000); + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SELECT * FROM bank.accounts; + ~~~ + + ~~~ + id | balance + +--------------------------------------+---------+ + 6f123370-c48c-41ff-b384-2c185590af2b | 380 + 990c9148-1ea0-4861-9da7-fd0e65b0a7da | 1000.50 + ac31c671-40bf-4a7b-8bee-452cff8a4026 | 500 + d58afd93-5be9-42ba-b2e2-dc00dcedf409 | 20000 + e6d8f696-87f5-4d3c-a377-8e152fdc27f7 | 55000 + (5 rows) + ~~~ + +1. Exit the SQL shell and delete the temporary pod: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > \q + ~~~ diff --git a/src/current/_includes/v25.3/orchestration/test-cluster-secure.md b/src/current/_includes/v25.3/orchestration/test-cluster-secure.md new file mode 100644 index 00000000000..f255d8d62fc --- /dev/null +++ b/src/current/_includes/v25.3/orchestration/test-cluster-secure.md @@ -0,0 +1,145 @@ +To use the CockroachDB SQL client, first launch a secure pod running the `cockroach` binary. + +
+ +{% capture latest_operator_version %}{% include_cached latest_operator_version.md %}{% endcapture %} + +{% include_cached copy-clipboard.html %} +~~~ shell +$ kubectl create \ +-f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/examples/client-secure-operator.yaml +~~~ + +1. Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach sql \ + --certs-dir=/cockroach/cockroach-certs \ + --host=cockroachdb-public + ~~~ + + ~~~ + # Welcome to the CockroachDB SQL shell. + # All statements must be terminated by a semicolon. + # To exit, type: \q. + # + # Server version: CockroachDB CCL v21.1.0 (x86_64-unknown-linux-gnu, built 2021/04/23 13:54:57, go1.13.14) (same version as client) + # Cluster ID: a96791d9-998c-4683-a3d3-edbf425bbf11 + # + # Enter \? for a brief introduction. + # + root@cockroachdb-public:26257/defaultdb> + ~~~ + +{% include {{ page.version.version }}/orchestration/kubernetes-basic-sql.md %} + +
+ +
+ +{% include_cached copy-clipboard.html %} +~~~ shell +$ kubectl create \ +-f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +~~~ + +~~~ +pod/cockroachdb-client-secure created +~~~ + +1. Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach sql \ + --certs-dir=/cockroach-certs \ + --host=cockroachdb-public + ~~~ + + ~~~ + # Welcome to the cockroach SQL interface. + # All statements must be terminated by a semicolon. + # To exit: CTRL + D. + # + # Client version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6) + # Server version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6) + + # Cluster ID: 256a8705-e348-4e3a-ab12-e1aba96857e4 + # + # Enter \? for a brief introduction. + # + root@cockroachdb-public:26257/defaultdb> + ~~~ + + {{site.data.alerts.callout_success}} + This pod will continue running indefinitely, so any time you need to reopen the built-in SQL client or run any other [`cockroach` client commands]({% link {{ page.version.version }}/cockroach-commands.md %}) (e.g., `cockroach node`), repeat step 2 using the appropriate `cockroach` command. + + If you'd prefer to delete the pod and recreate it when needed, run `kubectl delete pod cockroachdb-client-secure`. + {{site.data.alerts.end}} + +{% include {{ page.version.version }}/orchestration/kubernetes-basic-sql.md %} +
+ +
+From your local workstation, use our [`client-secure.yaml`](https://github.com/cockroachdb/helm-charts/blob/master/examples/client-secure.yaml) file to launch a pod and keep it running indefinitely. + +1. Download the file: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl -OOOOOOOOO \ + https://raw.githubusercontent.com/cockroachdb/helm-charts/master/examples/client-secure.yaml + ~~~ + +1. In the file, set the following values: + - `spec.serviceAccountName: my-release-cockroachdb` + - `spec.image: cockroachdb/cockroach: {your CockroachDB version}` + - `spec.volumes[0].project.sources[0].secret.name: my-release-cockroachdb-client-secret` + +1. Use the file to launch a pod and keep it running indefinitely: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl create -f client-secure.yaml + ~~~ + + ~~~ + pod "cockroachdb-client-secure" created + ~~~ + +1. Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ kubectl exec -it cockroachdb-client-secure \ + -- ./cockroach sql \ + --certs-dir=./cockroach-certs \ + --host=my-release-cockroachdb-public + ~~~ + + ~~~ + # Welcome to the cockroach SQL interface. + # All statements must be terminated by a semicolon. + # To exit: CTRL + D. + # + # Client version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6) + # Server version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6) + + # Cluster ID: 256a8705-e348-4e3a-ab12-e1aba96857e4 + # + # Enter \? for a brief introduction. + # + root@my-release-cockroachdb-public:26257/defaultdb> + ~~~ + + {{site.data.alerts.callout_success}} + This pod will continue running indefinitely, so any time you need to reopen the built-in SQL client or run any other [`cockroach` client commands]({% link {{ page.version.version }}/cockroach-commands.md %}) (e.g., `cockroach node`), repeat step 2 using the appropriate `cockroach` command. + + If you'd prefer to delete the pod and recreate it when needed, run `kubectl delete pod cockroachdb-client-secure`. + {{site.data.alerts.end}} + +{% include {{ page.version.version }}/orchestration/kubernetes-basic-sql.md %} +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/performance/alter-primary-key-hash-sharded.md b/src/current/_includes/v25.3/performance/alter-primary-key-hash-sharded.md new file mode 100644 index 00000000000..7aac175286e --- /dev/null +++ b/src/current/_includes/v25.3/performance/alter-primary-key-hash-sharded.md @@ -0,0 +1,66 @@ +Let's assume the `events` table already exists: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE events ( + product_id INT8, + owner UUID, + serial_number VARCHAR, + event_id UUID, + ts TIMESTAMP, + data JSONB, + PRIMARY KEY (product_id, owner, serial_number, ts, event_id), + INDEX (ts) USING HASH +); +~~~ + +You can change an existing primary key to use hash sharding by adding the `USING HASH` clause at the end of the key definition: + +{% include_cached copy-clipboard.html %} +~~~ sql +> ALTER TABLE events ALTER PRIMARY KEY USING COLUMNS (product_id, owner, serial_number, ts, event_id) USING HASH; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW INDEX FROM events; +~~~ + +~~~ + table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit +-------------+---------------+------------+--------------+-------------------------------------------------------------------+-----------+---------+----------- + events | events_pkey | false | 1 | crdb_internal_event_id_owner_product_id_serial_number_ts_shard_16 | ASC | false | true + events | events_pkey | false | 2 | product_id | ASC | false | false + events | events_pkey | false | 3 | owner | ASC | false | false + events | events_pkey | false | 4 | serial_number | ASC | false | false + events | events_pkey | false | 5 | ts | ASC | false | false + events | events_pkey | false | 6 | event_id | ASC | false | false + events | events_pkey | false | 7 | data | N/A | true | false + events | events_ts_idx | true | 1 | crdb_internal_ts_shard_16 | ASC | false | true + events | events_ts_idx | true | 2 | ts | ASC | false | false + events | events_ts_idx | true | 3 | crdb_internal_event_id_owner_product_id_serial_number_ts_shard_16 | ASC | false | true + events | events_ts_idx | true | 4 | product_id | ASC | false | true + events | events_ts_idx | true | 5 | owner | ASC | false | true + events | events_ts_idx | true | 6 | serial_number | ASC | false | true + events | events_ts_idx | true | 7 | event_id | ASC | false | true +(14 rows) +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW COLUMNS FROM events; +~~~ + +~~~ + column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden +--------------------------------------------------------------------+-----------+-------------+----------------+-----------------------------------------------------------------------------------------------+-----------------------------+------------ + product_id | INT8 | false | NULL | | {events_pkey,events_ts_idx} | false + owner | UUID | false | NULL | | {events_pkey,events_ts_idx} | false + serial_number | VARCHAR | false | NULL | | {events_pkey,events_ts_idx} | false + event_id | UUID | false | NULL | | {events_pkey,events_ts_idx} | false + ts | TIMESTAMP | false | NULL | | {events_pkey,events_ts_idx} | false + data | JSONB | true | NULL | | {events_pkey} | false + crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {events_ts_idx} | true + crdb_internal_event_id_owner_product_id_serial_number_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(event_id, owner, product_id, serial_number, ts)), 16) | {events_pkey,events_ts_idx} | true +(8 rows) +~~~ diff --git a/src/current/_includes/v25.3/performance/check-rebalancing-after-partitioning.md b/src/current/_includes/v25.3/performance/check-rebalancing-after-partitioning.md new file mode 100644 index 00000000000..c7e19142bd4 --- /dev/null +++ b/src/current/_includes/v25.3/performance/check-rebalancing-after-partitioning.md @@ -0,0 +1,41 @@ +Over the next minutes, CockroachDB will rebalance all partitions based on the constraints you defined. + +To check this at a high level, access the Web UI on any node at `:8080` and look at the **Node List**. You'll see that the range count is still close to even across all nodes but much higher than before partitioning: + +Perf tuning rebalancing + +To check at a more granular level, SSH to one of the instances not running CockroachDB and run the `SHOW EXPERIMENTAL_RANGES` statement on the `vehicles` table: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach sql \ +{{page.certs}} \ +--host=
\ +--database=movr \ +--execute="SELECT * FROM \ +[SHOW EXPERIMENTAL_RANGES FROM TABLE vehicles] \ +WHERE \"start_key\" IS NOT NULL \ + AND \"start_key\" NOT LIKE '%Prefix%';" +~~~ + +~~~ + start_key | end_key | range_id | replicas | lease_holder ++------------------+----------------------------+----------+----------+--------------+ + /"boston" | /"boston"/PrefixEnd | 105 | {1,2,3} | 3 + /"los angeles" | /"los angeles"/PrefixEnd | 121 | {7,8,9} | 8 + /"new york" | /"new york"/PrefixEnd | 101 | {1,2,3} | 3 + /"san francisco" | /"san francisco"/PrefixEnd | 117 | {7,8,9} | 8 + /"seattle" | /"seattle"/PrefixEnd | 113 | {4,5,6} | 5 + /"washington dc" | /"washington dc"/PrefixEnd | 109 | {1,2,3} | 1 +(6 rows) +~~~ + +For reference, here's how the nodes map to zones: + +Node IDs | Zone +---------|----- +1-3 | `us-east1-b` (South Carolina) +4-6 | `us-west1-a` (Oregon) +7-9 | `us-west2-a` (Los Angeles) + +We can see that, after partitioning, the replicas for New York, Boston, and Washington DC are located on nodes 1-3 in `us-east1-b`, replicas for Seattle are located on nodes 4-6 in `us-west1-a`, and replicas for San Francisco and Los Angeles are located on nodes 7-9 in `us-west2-a`. diff --git a/src/current/_includes/v25.3/performance/check-rebalancing.md b/src/current/_includes/v25.3/performance/check-rebalancing.md new file mode 100644 index 00000000000..32e3d98f8f1 --- /dev/null +++ b/src/current/_includes/v25.3/performance/check-rebalancing.md @@ -0,0 +1,33 @@ +Since you started each node with the `--locality` flag set to its GCE zone, over the next minutes, CockroachDB will rebalance data evenly across the zones. + +To check this, access the DB Console on any node at `:8080` and look at the **Node List**. You'll see that the range count is more or less even across all nodes: + +Perf tuning rebalancing + +For reference, here's how the nodes map to zones: + +Node IDs | Zone +---------|----- +1-3 | `us-east1-b` (South Carolina) +4-6 | `us-west1-a` (Oregon) +7-9 | `us-west2-a` (Los Angeles) + +To verify even balancing at range level, SSH to one of the instances not running CockroachDB and run the `SHOW EXPERIMENTAL_RANGES` statement: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach sql \ +{{page.certs}} \ +--host=
\ +--database=movr \ +--execute="SHOW EXPERIMENTAL_RANGES FROM TABLE vehicles;" +~~~ + +~~~ + start_key | end_key | range_id | replicas | lease_holder ++-----------+---------+----------+----------+--------------+ + NULL | NULL | 33 | {3,4,7} | 7 +(1 row) +~~~ + +In this case, we can see that, for the single range containing `vehicles` data, one replica is in each zone, and the leaseholder is in the `us-west2-a` zone. diff --git a/src/current/_includes/v25.3/performance/configure-network.md b/src/current/_includes/v25.3/performance/configure-network.md new file mode 100644 index 00000000000..e9abeb94df3 --- /dev/null +++ b/src/current/_includes/v25.3/performance/configure-network.md @@ -0,0 +1,18 @@ +CockroachDB requires TCP communication on two ports: + +- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster) +- **8080** (`tcp:8080`) for accessing the DB Console + +Since GCE instances communicate on their internal IP addresses by default, you do not need to take any action to enable inter-node communication. However, to access the DB Console from your local network, you must [create a firewall rule for your project](https://cloud.google.com/vpc/docs/using-firewalls): + +Field | Recommended Value +------|------------------ +Name | **cockroachweb** +Source filter | IP ranges +Source IP ranges | Your local network's IP ranges +Allowed protocols | **tcp:8080** +Target tags | `cockroachdb` + +{{site.data.alerts.callout_info}} +The **tag** feature will let you easily apply the rule to your instances. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/performance/create-index-hash-sharded-secondary-index.md b/src/current/_includes/v25.3/performance/create-index-hash-sharded-secondary-index.md new file mode 100644 index 00000000000..05f66896541 --- /dev/null +++ b/src/current/_includes/v25.3/performance/create-index-hash-sharded-secondary-index.md @@ -0,0 +1,62 @@ +Let's assume the `events` table already exists: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE events ( + product_id INT8, + owner UUID, + serial_number VARCHAR, + event_id UUID, + ts TIMESTAMP, + data JSONB, + PRIMARY KEY (product_id, owner, serial_number, ts, event_id) +); +~~~ + +You can create a hash-sharded index on an existing table: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE INDEX ON events(ts) USING HASH; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW INDEX FROM events; +~~~ + +~~~ + table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit +-------------+---------------+------------+--------------+---------------------------+-----------+---------+----------- + events | events_pkey | false | 1 | product_id | ASC | false | false + events | events_pkey | false | 2 | owner | ASC | false | false + events | events_pkey | false | 3 | serial_number | ASC | false | false + events | events_pkey | false | 4 | ts | ASC | false | false + events | events_pkey | false | 5 | event_id | ASC | false | false + events | events_pkey | false | 6 | data | N/A | true | false + events | events_ts_idx | true | 1 | crdb_internal_ts_shard_16 | ASC | false | true + events | events_ts_idx | true | 2 | ts | ASC | false | false + events | events_ts_idx | true | 3 | product_id | ASC | false | true + events | events_ts_idx | true | 4 | owner | ASC | false | true + events | events_ts_idx | true | 5 | serial_number | ASC | false | true + events | events_ts_idx | true | 6 | event_id | ASC | false | true +(12 rows) +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW COLUMNS FROM events; +~~~ + +~~~ + column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden +----------------------------+-----------+-------------+----------------+---------------------------------------------------+-----------------------------+------------ + product_id | INT8 | false | NULL | | {events_pkey,events_ts_idx} | false + owner | UUID | false | NULL | | {events_pkey,events_ts_idx} | false + serial_number | VARCHAR | false | NULL | | {events_pkey,events_ts_idx} | false + event_id | UUID | false | NULL | | {events_pkey,events_ts_idx} | false + ts | TIMESTAMP | false | NULL | | {events_pkey,events_ts_idx} | false + data | JSONB | true | NULL | | {events_pkey} | false + crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {events_ts_idx} | true +(7 rows) +~~~ diff --git a/src/current/_includes/v25.3/performance/create-table-hash-sharded-primary-index.md b/src/current/_includes/v25.3/performance/create-table-hash-sharded-primary-index.md new file mode 100644 index 00000000000..40ba79a096a --- /dev/null +++ b/src/current/_includes/v25.3/performance/create-table-hash-sharded-primary-index.md @@ -0,0 +1,37 @@ +Let's create the `products` table and add a hash-sharded primary key on the `ts` column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE products ( + ts DECIMAL PRIMARY KEY USING HASH, + product_id INT8 + ); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW INDEX FROM products; +~~~ + +~~~ + table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit +-------------+---------------+------------+--------------+---------------------------+-----------+---------+----------- + products | products_pkey | false | 1 | crdb_internal_ts_shard_16 | ASC | false | true + products | products_pkey | false | 2 | ts | ASC | false | false + products | products_pkey | false | 3 | product_id | N/A | true | false +(3 rows) +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW COLUMNS FROM products; +~~~ + +~~~ + column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden +----------------------------+-----------+-------------+----------------+---------------------------------------------------+-----------------+------------ + crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {products_pkey} | true + ts | DECIMAL | false | NULL | | {products_pkey} | false + product_id | INT8 | true | NULL | | {products_pkey} | false +(3 rows) +~~~ diff --git a/src/current/_includes/v25.3/performance/create-table-hash-sharded-secondary-index.md b/src/current/_includes/v25.3/performance/create-table-hash-sharded-secondary-index.md new file mode 100644 index 00000000000..dc0e164a0fb --- /dev/null +++ b/src/current/_includes/v25.3/performance/create-table-hash-sharded-secondary-index.md @@ -0,0 +1,56 @@ +Let's now create the `events` table and add a secondary index on the `ts` column in a single statement: + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE TABLE events ( + product_id INT8, + owner UUID, + serial_number VARCHAR, + event_id UUID, + ts TIMESTAMP, + data JSONB, + PRIMARY KEY (product_id, owner, serial_number, ts, event_id), + INDEX (ts) USING HASH +); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW INDEX FROM events; +~~~ + +~~~ + table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit +-------------+---------------+------------+--------------+---------------------------+-----------+---------+----------- + events | events_pkey | false | 1 | product_id | ASC | false | false + events | events_pkey | false | 2 | owner | ASC | false | false + events | events_pkey | false | 3 | serial_number | ASC | false | false + events | events_pkey | false | 4 | ts | ASC | false | false + events | events_pkey | false | 5 | event_id | ASC | false | false + events | events_pkey | false | 6 | data | N/A | true | false + events | events_ts_idx | true | 1 | crdb_internal_ts_shard_16 | ASC | false | true + events | events_ts_idx | true | 2 | ts | ASC | false | false + events | events_ts_idx | true | 3 | product_id | ASC | false | true + events | events_ts_idx | true | 4 | owner | ASC | false | true + events | events_ts_idx | true | 5 | serial_number | ASC | false | true + events | events_ts_idx | true | 6 | event_id | ASC | false | true +(12 rows) +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW COLUMNS FROM events; +~~~ + +~~~ + column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden +----------------------------+-----------+-------------+----------------+---------------------------------------------------+-----------------------------+------------ + product_id | INT8 | false | NULL | | {events_pkey,events_ts_idx} | false + owner | UUID | false | NULL | | {events_pkey,events_ts_idx} | false + serial_number | VARCHAR | false | NULL | | {events_pkey,events_ts_idx} | false + event_id | UUID | false | NULL | | {events_pkey,events_ts_idx} | false + ts | TIMESTAMP | false | NULL | | {events_pkey,events_ts_idx} | false + data | JSONB | true | NULL | | {events_pkey} | false + crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {events_ts_idx} | true +(7 rows) +~~~ diff --git a/src/current/_includes/v25.3/performance/increase-server-side-retries.md b/src/current/_includes/v25.3/performance/increase-server-side-retries.md new file mode 100644 index 00000000000..95f2c2a9647 --- /dev/null +++ b/src/current/_includes/v25.3/performance/increase-server-side-retries.md @@ -0,0 +1,5 @@ +- [Send statements in transactions as a single batch]({% link {{ page.version.version }}/transactions.md %}#batched-statements). Batching allows CockroachDB to [automatically retry]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) a transaction when [previous reads are invalidated]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#read-refreshing) at a [pushed timestamp]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). When a multi-statement transaction is not batched, and takes more than a single round trip, CockroachDB cannot automatically retry the transaction. For an example showing how to break up large transactions in an application, see [Break up large transactions into smaller units of work](build-a-python-app-with-cockroachdb-sqlalchemy.html#break-up-large-transactions-into-smaller-units-of-work). + + + +- Limit the size of the result sets of your transactions to under 16KB, so that CockroachDB is more likely to [automatically retry]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) when [previous reads are invalidated]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#read-refreshing) at a [pushed timestamp]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). When a transaction returns a result set over 16KB, even if that transaction has been sent as a single batch, CockroachDB cannot automatically retry the transaction. You can change the results buffer size for all new sessions using the `sql.defaults.results_buffer.size` [cluster setting](cluster-settings.html), or for a specific session using the `results_buffer_size` [connection parameter]({% link {{page.version.version}}/connection-parameters.md %}#additional-connection-parameters). diff --git a/src/current/_includes/v25.3/performance/lease-preference-system-database.md b/src/current/_includes/v25.3/performance/lease-preference-system-database.md new file mode 100644 index 00000000000..9661aef0e2d --- /dev/null +++ b/src/current/_includes/v25.3/performance/lease-preference-system-database.md @@ -0,0 +1,10 @@ +To reduce latency while making {% if page.name == "online-schema-changes.md" %}online schema changes{% else %}[online schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}){% endif %}, we recommend specifying a `lease_preference` [zone configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}) on the `system` database to a single region and running all subsequent schema changes from a node within that region. For example, if the majority of online schema changes come from machines that are geographically close to `us-east1`, run the following: + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER DATABASE system CONFIGURE ZONE USING constraints = '{"+region=us-east1": 1}', lease_preferences = '[[+region=us-east1]]'; +~~~ + +Run all subsequent schema changes from a node in the specified region. + +If you do not intend to run more schema changes from that region, you can safely [remove the lease preference from the zone configuration]({% link {{ page.version.version }}/alter-database.md %}#remove-a-replication-zone) for the system database. diff --git a/src/current/_includes/v25.3/performance/partition-by-city.md b/src/current/_includes/v25.3/performance/partition-by-city.md new file mode 100644 index 00000000000..226c2d1d5f3 --- /dev/null +++ b/src/current/_includes/v25.3/performance/partition-by-city.md @@ -0,0 +1,419 @@ +For this service, the most effective technique for improving read and write latency is to [geo-partition]({% link {{ page.version.version }}/partitioning.md %}) the data by city. In essence, this means changing the way data is mapped to ranges. Instead of an entire table and its indexes mapping to a specific range or set of ranges, all rows in the table and its indexes with a given city will map to a range or set of ranges. Once ranges are defined in this way, we can then use the [replication zone]({% link {{ page.version.version }}/configure-replication-zones.md %}) feature to pin partitions to specific locations, ensuring that read and write requests from users in a specific city do not have to leave that region. + +1. Partitioning is an enterprise feature, so start off by [registering for a 30-day trial license](https://www.cockroachlabs.com/get-cockroachdb/enterprise/). + +1. Once you've received the trial license, SSH to any node in your cluster and [apply the license]({% link {{ page.version.version }}/licensing-faqs.md %}#set-a-license): + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --host=
\ + --execute="SET CLUSTER SETTING cluster.organization = '';" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --host=
\ + --execute="SET CLUSTER SETTING enterprise.license = '';" + ~~~ + +1. Define partitions for all tables and their secondary indexes. + + Start with the `users` table: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="ALTER TABLE users \ + PARTITION BY LIST (city) ( \ + PARTITION new_york VALUES IN ('new york'), \ + PARTITION boston VALUES IN ('boston'), \ + PARTITION washington_dc VALUES IN ('washington dc'), \ + PARTITION seattle VALUES IN ('seattle'), \ + PARTITION san_francisco VALUES IN ('san francisco'), \ + PARTITION los_angeles VALUES IN ('los angeles') \ + );" + ~~~ + + Now define partitions for the `vehicles` table and its secondary indexes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="ALTER TABLE vehicles \ + PARTITION BY LIST (city) ( \ + PARTITION new_york VALUES IN ('new york'), \ + PARTITION boston VALUES IN ('boston'), \ + PARTITION washington_dc VALUES IN ('washington dc'), \ + PARTITION seattle VALUES IN ('seattle'), \ + PARTITION san_francisco VALUES IN ('san francisco'), \ + PARTITION los_angeles VALUES IN ('los angeles') \ + );" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="ALTER INDEX vehicles_auto_index_fk_city_ref_users \ + PARTITION BY LIST (city) ( \ + PARTITION new_york VALUES IN ('new york'), \ + PARTITION boston VALUES IN ('boston'), \ + PARTITION washington_dc VALUES IN ('washington dc'), \ + PARTITION seattle VALUES IN ('seattle'), \ + PARTITION san_francisco VALUES IN ('san francisco'), \ + PARTITION los_angeles VALUES IN ('los angeles') \ + );" + ~~~ + + Next, define partitions for the `rides` table and its secondary indexes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="ALTER TABLE rides \ + PARTITION BY LIST (city) ( \ + PARTITION new_york VALUES IN ('new york'), \ + PARTITION boston VALUES IN ('boston'), \ + PARTITION washington_dc VALUES IN ('washington dc'), \ + PARTITION seattle VALUES IN ('seattle'), \ + PARTITION san_francisco VALUES IN ('san francisco'), \ + PARTITION los_angeles VALUES IN ('los angeles') \ + );" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="ALTER INDEX rides_auto_index_fk_city_ref_users \ + PARTITION BY LIST (city) ( \ + PARTITION new_york VALUES IN ('new york'), \ + PARTITION boston VALUES IN ('boston'), \ + PARTITION washington_dc VALUES IN ('washington dc'), \ + PARTITION seattle VALUES IN ('seattle'), \ + PARTITION san_francisco VALUES IN ('san francisco'), \ + PARTITION los_angeles VALUES IN ('los angeles') \ + );" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="ALTER INDEX rides_auto_index_fk_vehicle_city_ref_vehicles \ + PARTITION BY LIST (vehicle_city) ( \ + PARTITION new_york VALUES IN ('new york'), \ + PARTITION boston VALUES IN ('boston'), \ + PARTITION washington_dc VALUES IN ('washington dc'), \ + PARTITION seattle VALUES IN ('seattle'), \ + PARTITION san_francisco VALUES IN ('san francisco'), \ + PARTITION los_angeles VALUES IN ('los angeles') \ + );" + ~~~ + + Finally, drop an unused index on `rides` rather than partition it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql \ + {{page.certs}} \ + --database=movr \ + --host=
\ + --execute="DROP INDEX rides_start_time_idx;" + ~~~ + + {{site.data.alerts.callout_info}} + The `rides` table contains 1 million rows, so dropping this index will take a few minutes. + {{site.data.alerts.end}} + +1. Now [create replication zones]({% link {{ page.version.version }}/configure-replication-zones.md %}#create-a-replication-zone-for-a-partition) to require city data to be stored on specific nodes based on node locality. + + City | Locality + -----|--------- + New York | `zone=us-east1-b` + Boston | `zone=us-east1-b` + Washington DC | `zone=us-east1-b` + Seattle | `zone=us-west1-a` + San Francisco | `zone=us-west2-a` + Los Angeles | `zone=us-west2-a` + + {{site.data.alerts.callout_info}} + Since our nodes are located in 3 specific GCE zones, we're only going to use the `zone=` portion of node locality. If we were using multiple zones per regions, we would likely use the `region=` portion of the node locality instead. + {{site.data.alerts.end}} + + Start with the `users` table partitions: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION new_york OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION boston OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION washington_dc OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION seattle OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION san_francisco OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION los_angeles OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + Move on to the `vehicles` table and secondary index partitions: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION new_york OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION new_york OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION boston OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION boston OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION washington_dc OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION washington_dc OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION seattle OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION seattle OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION san_francisco OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION san_francisco OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION los_angeles OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION los_angeles OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + Finish with the `rides` table and secondary index partitions: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION new_york OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION new_york OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION new_york OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION boston OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION boston OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION boston OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION washington_dc OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION washington_dc OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION washington_dc OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION seattle OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION seattle OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION seattle OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION san_francisco OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION san_francisco OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION san_francisco OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION los_angeles OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION los_angeles OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --execute="ALTER PARTITION los_angeles OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \ + {{page.certs}} \ + --host=
+ ~~~ diff --git a/src/current/_includes/v25.3/performance/reduce-contention.md b/src/current/_includes/v25.3/performance/reduce-contention.md new file mode 100644 index 00000000000..0f52e1f212a --- /dev/null +++ b/src/current/_includes/v25.3/performance/reduce-contention.md @@ -0,0 +1,17 @@ +- Limit the number of affected rows by following [optimizing queries]({% link {{ page.version.version }}/apply-statement-performance-rules.md %}) (e.g., avoiding full scans, creating secondary indexes, etc.). Not only will transactions run faster, lock fewer rows, and hold locks for a shorter duration, but the chances of [read invalidation]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#read-refreshing) when the transaction's [timestamp is pushed]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache), due to a conflicting write, are decreased because of a smaller read set (i.e., a smaller number of rows read). + +- Break down larger transactions (e.g., [bulk deletes]({% link {{ page.version.version }}/bulk-delete-data.md %})) into smaller ones to have transactions hold locks for a shorter duration. For example, use [common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) to group multiple clauses together in a single SQL statement. This will also decrease the likelihood of [pushed timestamps]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). For instance, as the size of writes (number of rows written) decreases, the chances of the transaction's timestamp getting bumped by concurrent reads decreases. + +- Use [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) to aggressively lock rows that will later be updated in the transaction. Updates must operate on the most recent version of a row, so a concurrent write to the row will cause a retry error ([`RETRY_WRITE_TOO_OLD`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#retry_write_too_old)). Locking early in the transaction forces concurrent writers to block until the transaction is finished, which prevents the retry error. Note that this locks the rows for the duration of the transaction; whether this is tenable will depend on your workload. For more information, see [When and why to use `SELECT FOR UPDATE` in CockroachDB](https://www.cockroachlabs.com/blog/when-and-why-to-use-select-for-update-in-cockroachdb/). + +- Use historical reads ([`SELECT ... AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %})), preferably [bounded staleness reads]({% link {{ page.version.version }}/follower-reads.md %}#when-to-use-bounded-staleness-reads) or [exact staleness with follower reads]({% link {{ page.version.version }}/follower-reads.md %}#run-queries-that-use-exact-staleness-follower-reads) when possible to reduce conflicts with other writes. This reduces the likelihood of [`RETRY_SERIALIZABLE`](transaction-retry-error-reference.html#retry_serializable) errors as fewer writes will happen at the historical timestamp. More specifically, writes' timestamps are less likely to be pushed by historical reads as they would [when the read has a higher priority level](architecture/transaction-layer.html#transaction-conflicts). Note that if the `AS OF SYSTEM TIME` value is below the closed timestamp, the read cannot be invalidated. + +- When replacing values in a row, use [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) and specify values for all columns in the inserted rows. This will usually have the best performance under contention, compared to combinations of [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}), [`INSERT`]({% link {{ page.version.version }}/insert.md %}), and [`UPDATE`](update.html). + +- If applicable to your workload, assign [column families]({% link {{ page.version.version }}/column-families.md %}#default-behavior) and separate columns that are frequently read and written into separate columns. Transactions will operate on disjoint column families and reduce the likelihood of conflicts. + +- As a last resort, consider adjusting the [closed timestamp interval]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#closed-timestamps) using the `kv.closed_timestamp.target_duration` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to reduce the likelihood of long-running write transactions having their [timestamps pushed]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). This setting should be carefully adjusted if **no other mitigations are available** because there can be downstream implications (e.g., historical reads, change data capture feeds, statistics collection, handling zone configurations, etc.). For example, a transaction _A_ is forced to refresh (i.e., change its timestamp) due to hitting the maximum [_closed timestamp_]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#closed-timestamps) interval (closed timestamps enable [Follower Reads](follower-reads.html#how-stale-follower-reads-work) and [Change Data Capture (CDC)](change-data-capture-overview.html)). This can happen when transaction _A_ is a long-running transaction, and there is a write by another transaction to data that _A_ has already read. + +{{site.data.alerts.callout_info}} +If you increase the `kv.closed_timestamp.target_duration` setting, it means that you are increasing the amount of time by which the data available in [Follower Reads]({% link {{ page.version.version }}/follower-reads.md %}) and [CDC changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) lags behind the current state of the cluster. In other words, there is a trade-off here: if you absolutely must execute long-running transactions that execute concurrently with other transactions that are writing to the same data, you may have to settle for longer delays on Follower Reads and/or CDC to avoid frequent serialization errors. The anomaly that would be exhibited if these transactions were not retried is called [write skew](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/performance/reduce-hotspots.md b/src/current/_includes/v25.3/performance/reduce-hotspots.md new file mode 100644 index 00000000000..799fed761b8 --- /dev/null +++ b/src/current/_includes/v25.3/performance/reduce-hotspots.md @@ -0,0 +1,37 @@ +- Use index keys with a random distribution of values, so that transactions over different rows are more likely to operate on separate data ranges. See the [SQL FAQs]({% link {{ page.version.version }}/sql-faqs.md %}#how-do-i-auto-generate-unique-row-ids-in-cockroachdb) on row IDs for suggestions. + +- Place parts of the records that are modified by different transactions in different tables. That is, increase [normalization](https://wikipedia.org/wiki/Database_normalization). However, there are benefits and drawbacks to increasing normalization. + + - Benefits of increasing normalization: + + - Can improve performance for write-heavy workloads. This is because, with increased normalization, a given business fact must be written to one place rather than to multiple places. + - Allows separate transactions to modify related underlying data without causing [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). + - Reduces the chance of data inconsistency, since a given business fact must be written only to one place. + - Reduces or eliminates data redundancy. + - Uses less disk space. + + - Drawbacks of increasing normalization: + + - Can reduce performance for read-heavy workloads. This is because increasing normalization results in more joins, and can make the SQL more complicated in other ways. + - More complex data model. + + - In general: + + - Increase normalization for write-intensive and read/write-intensive transactional workloads. + - Do not increase normalization for read-intensive reporting workloads. + +- If the application strictly requires operating on very few different index keys, consider using [`ALTER ... SPLIT AT`]({% link {{ page.version.version }}/alter-table.md %}#split-at) so that each index key can be served by a separate group of nodes in the cluster. + +- If you are working with a table that **must** be indexed on sequential keys, consider using [hash-sharded indexes]({% link {{ page.version.version }}/hash-sharded-indexes.md %}). For details about the mechanics and performance improvements of hash-sharded indexes in CockroachDB, see the blog post [Hash Sharded Indexes Unlock Linear Scaling for Sequential Workloads](https://www.cockroachlabs.com/blog/hash-sharded-indexes-unlock-linear-scaling-for-sequential-workloads/). As part of this, we recommend doing thorough performance testing with and without hash-sharded indexes to see which works best for your application. + +- To avoid read hotspots: + + - Increase data distribution, which will allow for more ranges. The hotspot exists because the data being accessed is all co-located in one range. + - Increase [load balancing]({% link {{ page.version.version }}/recommended-production-settings.md %}#load-balancing) across more nodes in the same range. Most transactional reads must go to the leaseholder in CockroachDB, which means that opportunities for load balancing over replicas are minimal. + + However, the following features do permit load balancing over replicas: + + - [Global tables]({% link {{ page.version.version }}/global-tables.md %}). + - [Follower reads]({% link {{ page.version.version }}/follower-reads.md %}) (both the bounded staleness and the exact staleness kinds). + + In these cases, more replicas will help, up to the number of nodes in the cluster. \ No newline at end of file diff --git a/src/current/_includes/v25.3/performance/scale-cluster.md b/src/current/_includes/v25.3/performance/scale-cluster.md new file mode 100644 index 00000000000..3575d31e374 --- /dev/null +++ b/src/current/_includes/v25.3/performance/scale-cluster.md @@ -0,0 +1,61 @@ +1. SSH to one of the `n2-standard-4` instances in the `us-west1-a` zone. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, extract the binary, and copy it into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + {{page.certs}} \ + --advertise-host= \ + --join= \ + --locality=cloud=gce,region=us-west1,zone=us-west1-a \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + +1. Repeat steps 1 - 3 for the other two `n2-standard-4` instances in the `us-west1-a` zone. + +1. SSH to one of the `n2-standard-4` instances in the `us-west2-a` zone. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, extract the binary, and copy it into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + {{page.certs}} \ + --advertise-host= \ + --join= \ + --locality=cloud=gce,region=us-west2,zone=us-west2-a \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + +1. Repeat steps 5 - 7 for the other two `n2-standard-4` instances in the `us-west2-a` zone. diff --git a/src/current/_includes/v25.3/performance/sql-trace-txn-enable-threshold.md b/src/current/_includes/v25.3/performance/sql-trace-txn-enable-threshold.md new file mode 100644 index 00000000000..9ec50e30bdb --- /dev/null +++ b/src/current/_includes/v25.3/performance/sql-trace-txn-enable-threshold.md @@ -0,0 +1,6 @@ +{%- if include.version_prefix != nil -%} + {%- assign url = include.version_prefix | append: "cluster-settings.html#setting-sql-trace-txn-enable-threshold" | absolute_url -%} +{%- else -%} + {%- assign url = "cluster-settings.html#setting-sql-trace-txn-enable-threshold" -%} +{%- endif -%} +The default tracing behavior captures a small percent of transactions, so not all contention events will be recorded. When investigating transaction contention, you can set the [`sql.trace.txn.enable_threshold` cluster setting]({{ url }}) to always capture contention events. diff --git a/src/current/_includes/v25.3/performance/start-cluster.md b/src/current/_includes/v25.3/performance/start-cluster.md new file mode 100644 index 00000000000..516aca418f1 --- /dev/null +++ b/src/current/_includes/v25.3/performance/start-cluster.md @@ -0,0 +1,60 @@ +#### Start the nodes + +1. SSH to the first `n2-standard-4` instance. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, extract the binary, and copy it into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + {{page.certs}} \ + --advertise-host= \ + --join=:26257,:26257,:26257 \ + --locality=cloud=gce,region=us-east1,zone=us-east1-b \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + +1. Repeat steps 1 - 3 for the other two `n2-standard-4` instances. Be sure to adjust the `--advertise-addr` flag each time. + +#### Initialize the cluster + +1. SSH to the fourth instance, the one not running a CockroachDB node. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + +1. Run the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach init {{page.certs}} --host=
+ ~~~ + + Each node then prints helpful details to the [standard output]({% link {{ page.version.version }}/cockroach-start.md %}#standard-output), such as the CockroachDB version, the URL for the DB Console, and the SQL URL for clients. diff --git a/src/current/_includes/v25.3/performance/test-performance-after-partitioning.md b/src/current/_includes/v25.3/performance/test-performance-after-partitioning.md new file mode 100644 index 00000000000..dcd388bfb43 --- /dev/null +++ b/src/current/_includes/v25.3/performance/test-performance-after-partitioning.md @@ -0,0 +1,93 @@ +After partitioning, reads and writers for a specific city will be much faster because all replicas for that city are now located on the nodes closest to the city. + +To check this, let's repeat a few of the read and write queries that we executed before partitioning in [step 12](#step-12-test-performance). + +#### Reads + +Again imagine we are a Movr administrator in New York, and we want to get the IDs and descriptions of all New York-based bikes that are currently in use: + +1. SSH to the instance in `us-east1-b` with the Python client. + +1. Query for the data: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ {{page.app}} \ + --host=
\ + --statement="SELECT id, ext FROM vehicles \ + WHERE city = 'new york' \ + AND type = 'bike' \ + AND status = 'in_use'" \ + --repeat=50 \ + --times + ~~~ + + ~~~ + Result: + ['id', 'ext'] + ['0068ee24-2dfb-437d-9a5d-22bb742d519e', "{u'color': u'green', u'brand': u'Kona'}"] + ['01b80764-283b-4232-8961-a8d6a4121a08', "{u'color': u'green', u'brand': u'Pinarello'}"] + ['02a39628-a911-4450-b8c0-237865546f7f', "{u'color': u'black', u'brand': u'Schwinn'}"] + ['02eb2a12-f465-4575-85f8-a4b77be14c54', "{u'color': u'black', u'brand': u'Pinarello'}"] + ['02f2fcc3-fea6-4849-a3a0-dc60480fa6c2', "{u'color': u'red', u'brand': u'FujiCervelo'}"] + ['034d42cf-741f-428c-bbbb-e31820c68588', "{u'color': u'yellow', u'brand': u'Santa Cruz'}"] + ... + + Times (milliseconds): + [20.065784454345703, 7.866144180297852, 8.362054824829102, 9.08803939819336, 7.925987243652344, 7.543087005615234, 7.786035537719727, 8.227825164794922, 7.907867431640625, 7.654905319213867, 7.793903350830078, 7.627964019775391, 7.833957672119141, 7.858037948608398, 7.474184036254883, 9.459972381591797, 7.726192474365234, 7.194995880126953, 7.364034652709961, 7.25102424621582, 7.650852203369141, 7.663965225219727, 9.334087371826172, 7.810115814208984, 7.543087005615234, 7.134914398193359, 7.922887802124023, 7.220029830932617, 7.606029510498047, 7.208108901977539, 7.333993911743164, 7.464170455932617, 7.679939270019531, 7.436990737915039, 7.62486457824707, 7.235050201416016, 7.420063018798828, 7.795095443725586, 7.39598274230957, 7.546901702880859, 7.582187652587891, 7.9669952392578125, 7.418155670166016, 7.539033889770508, 7.805109024047852, 7.086992263793945, 7.069826126098633, 7.833957672119141, 7.43412971496582, 7.035017013549805] + + Median time (milliseconds): + 7.62641429901 + ~~~ + +Before partitioning, this query took a median time of 72.02ms. After partitioning, the query took a median time of only 7.62ms. + +#### Writes + +Now let's again imagine 100 people in New York and 100 people in Seattle and 100 people in New York want to create new Movr accounts: + +1. SSH to the instance in `us-west1-a` with the Python client. + +1. Create 100 Seattle-based users: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {{page.app}} \ + --host=
\ + --statement="INSERT INTO users VALUES (gen_random_uuid(), 'seattle', 'Seatller', '111 East Street', '1736352379937347')" \ + --repeat=100 \ + --times + ~~~ + + ~~~ + Times (milliseconds): + [41.8248176574707, 9.701967239379883, 8.725166320800781, 9.058952331542969, 7.819175720214844, 6.247997283935547, 10.265827178955078, 7.627964019775391, 9.120941162109375, 7.977008819580078, 9.247064590454102, 8.929967880249023, 9.610176086425781, 14.40286636352539, 8.588075637817383, 8.67319107055664, 9.417057037353516, 7.652044296264648, 8.917093276977539, 9.135961532592773, 8.604049682617188, 9.220123291015625, 7.578134536743164, 9.096860885620117, 8.942842483520508, 8.63790512084961, 7.722139358520508, 13.59701156616211, 9.176015853881836, 11.484146118164062, 9.212017059326172, 7.563114166259766, 8.793115615844727, 8.80289077758789, 7.827043533325195, 7.6389312744140625, 17.47584342956543, 9.436845779418945, 7.63392448425293, 8.594989776611328, 9.002208709716797, 8.93402099609375, 8.71896743774414, 8.76307487487793, 8.156061172485352, 8.729934692382812, 8.738040924072266, 8.25190544128418, 8.971929550170898, 7.460832595825195, 8.889198303222656, 8.45789909362793, 8.761167526245117, 10.223865509033203, 8.892059326171875, 8.961915969848633, 8.968114852905273, 7.750988006591797, 7.761955261230469, 9.199142456054688, 9.02700424194336, 9.509086608886719, 9.428977966308594, 7.902860641479492, 8.940935134887695, 8.615970611572266, 8.75401496887207, 7.906913757324219, 8.179187774658203, 11.447906494140625, 8.71419906616211, 9.202003479003906, 9.263038635253906, 9.089946746826172, 8.92496109008789, 10.32114028930664, 7.913827896118164, 9.464025497436523, 10.612010955810547, 8.78596305847168, 8.878946304321289, 7.575035095214844, 10.657072067260742, 8.777856826782227, 8.649110794067383, 9.012937545776367, 8.931875228881836, 9.31406021118164, 9.396076202392578, 8.908987045288086, 8.002996444702148, 9.089946746826172, 7.5588226318359375, 8.918046951293945, 12.117862701416016, 7.266998291015625, 8.074045181274414, 8.955001831054688, 8.868932723999023, 8.755922317504883] + + Median time (milliseconds): + 8.90052318573 + ~~~ + + Before partitioning, this query took a median time of 48.40ms. After partitioning, the query took a median time of only 8.90ms. + +1. SSH to the instance in `us-east1-b` with the Python client. + +1. Create 100 new NY-based users: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {{page.app}} \ + --host=
\ + --statement="INSERT INTO users VALUES (gen_random_uuid(), 'new york', 'New Yorker', '111 West Street', '9822222379937347')" \ + --repeat=100 \ + --times + ~~~ + + ~~~ + Times (milliseconds): + [276.3068675994873, 9.830951690673828, 8.772134780883789, 9.304046630859375, 8.24880599975586, 7.959842681884766, 7.848978042602539, 7.879018783569336, 7.754087448120117, 10.724067687988281, 13.960123062133789, 9.825944900512695, 9.60993766784668, 9.273052215576172, 9.41920280456543, 8.040904998779297, 16.484975814819336, 10.178089141845703, 8.322000503540039, 9.468793869018555, 8.002042770385742, 9.185075759887695, 9.54294204711914, 9.387016296386719, 9.676933288574219, 13.051986694335938, 9.506940841674805, 12.327909469604492, 10.377168655395508, 15.023946762084961, 9.985923767089844, 7.853031158447266, 9.43303108215332, 9.164094924926758, 10.941028594970703, 9.37199592590332, 12.359857559204102, 8.975028991699219, 7.728099822998047, 8.310079574584961, 9.792089462280273, 9.448051452636719, 8.057117462158203, 9.37795639038086, 9.753942489624023, 9.576082229614258, 8.192062377929688, 9.392023086547852, 7.97581672668457, 8.165121078491211, 9.660959243774414, 8.270978927612305, 9.901046752929688, 8.085966110229492, 10.581016540527344, 9.831905364990234, 7.883787155151367, 8.077859878540039, 8.161067962646484, 10.02812385559082, 7.9898834228515625, 9.840965270996094, 9.452104568481445, 9.747028350830078, 9.003162384033203, 9.206056594848633, 9.274005889892578, 7.8449249267578125, 8.827924728393555, 9.322881698608398, 12.08186149597168, 8.76307487487793, 8.353948593139648, 8.182048797607422, 7.736921310424805, 9.31406021118164, 9.263992309570312, 9.282112121582031, 7.823944091796875, 9.11712646484375, 8.099079132080078, 9.156942367553711, 8.363962173461914, 10.974884033203125, 8.729934692382812, 9.2620849609375, 9.27591323852539, 8.272886276245117, 8.25190544128418, 8.093118667602539, 9.259939193725586, 8.413076400756836, 8.198976516723633, 9.95182991027832, 8.024930953979492, 8.895158767700195, 8.243083953857422, 9.076833724975586, 9.994029998779297, 10.149955749511719] + + Median time (milliseconds): + 9.26303863525 + ~~~ + + Before partitioning, this query took a median time of 116.86ms. After partitioning, the query took a median time of only 9.26ms. diff --git a/src/current/_includes/v25.3/performance/test-performance.md b/src/current/_includes/v25.3/performance/test-performance.md new file mode 100644 index 00000000000..b9a8ca0e6c8 --- /dev/null +++ b/src/current/_includes/v25.3/performance/test-performance.md @@ -0,0 +1,146 @@ +In general, all of the tuning techniques featured in the single-region scenario above still apply in a multi-region deployment. However, the fact that data and leaseholders are spread across the US means greater latencies in many cases. + +#### Reads + +For example, imagine we are a Movr administrator in New York, and we want to get the IDs and descriptions of all New York-based bikes that are currently in use: + +1. SSH to the instance in `us-east1-b` with the Python client. + +1. Query for the data: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ {{page.app}} \ + --host=
\ + --statement="SELECT id, ext FROM vehicles \ + WHERE city = 'new york' \ + AND type = 'bike' \ + AND status = 'in_use'" \ + --repeat=50 \ + --times + ~~~ + + ~~~ + Result: + ['id', 'ext'] + ['0068ee24-2dfb-437d-9a5d-22bb742d519e', "{u'color': u'green', u'brand': u'Kona'}"] + ['01b80764-283b-4232-8961-a8d6a4121a08', "{u'color': u'green', u'brand': u'Pinarello'}"] + ['02a39628-a911-4450-b8c0-237865546f7f', "{u'color': u'black', u'brand': u'Schwinn'}"] + ['02eb2a12-f465-4575-85f8-a4b77be14c54', "{u'color': u'black', u'brand': u'Pinarello'}"] + ['02f2fcc3-fea6-4849-a3a0-dc60480fa6c2', "{u'color': u'red', u'brand': u'FujiCervelo'}"] + ['034d42cf-741f-428c-bbbb-e31820c68588', "{u'color': u'yellow', u'brand': u'Santa Cruz'}"] + ... + + Times (milliseconds): + [933.8209629058838, 72.02410697937012, 72.45206832885742, 72.39294052124023, 72.8158950805664, 72.07584381103516, 72.21412658691406, 71.96712493896484, 71.75517082214355, 72.16811180114746, 71.78592681884766, 72.91603088378906, 71.91109657287598, 71.4719295501709, 72.40676879882812, 71.8080997467041, 71.84004783630371, 71.98500633239746, 72.40891456604004, 73.75001907348633, 71.45905494689941, 71.53081893920898, 71.46596908569336, 72.07608222961426, 71.94995880126953, 71.41804695129395, 71.29096984863281, 72.11899757385254, 71.63381576538086, 71.3050365447998, 71.83194160461426, 71.20394706726074, 70.9981918334961, 72.79205322265625, 72.63493537902832, 72.15285301208496, 71.8698501586914, 72.30591773986816, 71.53582572937012, 72.69001007080078, 72.03006744384766, 72.56317138671875, 71.61688804626465, 72.17121124267578, 70.20092010498047, 72.12018966674805, 73.34589958190918, 73.01592826843262, 71.49410247802734, 72.19099998474121] + + Median time (milliseconds): + 72.0270872116 + ~~~ + +As we saw earlier, the leaseholder for the `vehicles` table is in `us-west2-a` (Los Angeles), so our query had to go from the gateway node in `us-east1-b` all the way to the west coast and then back again before returning data to the client. + +For contrast, imagine we are now a Movr administrator in Los Angeles, and we want to get the IDs and descriptions of all Los Angeles-based bikes that are currently in use: + +1. SSH to the instance in `us-west2-a` with the Python client. + +1. Query for the data: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ {{page.app}} \ + --host=
\ + --statement="SELECT id, ext FROM vehicles \ + WHERE city = 'los angeles' \ + AND type = 'bike' \ + AND status = 'in_use'" \ + --repeat=50 \ + --times + ~~~ + + ~~~ + Result: + ['id', 'ext'] + ['00078349-94d4-43e6-92be-8b0d1ac7ee9f', "{u'color': u'blue', u'brand': u'Merida'}"] + ['003f84c4-fa14-47b2-92d4-35a3dddd2d75', "{u'color': u'red', u'brand': u'Kona'}"] + ['0107a133-7762-4392-b1d9-496eb30ee5f9', "{u'color': u'yellow', u'brand': u'Kona'}"] + ['0144498b-4c4f-4036-8465-93a6bea502a3', "{u'color': u'blue', u'brand': u'Pinarello'}"] + ['01476004-fb10-4201-9e56-aadeb427f98a', "{u'color': u'black', u'brand': u'Merida'}"] + + Times (milliseconds): + [782.6759815216064, 8.564949035644531, 8.226156234741211, 7.949113845825195, 7.86590576171875, 7.842063903808594, 7.674932479858398, 7.555961608886719, 7.642984390258789, 8.024930953979492, 7.717132568359375, 8.46409797668457, 7.520914077758789, 7.6541900634765625, 7.458925247192383, 7.671833038330078, 7.740020751953125, 7.771015167236328, 7.598161697387695, 8.411169052124023, 7.408857345581055, 7.469892501831055, 7.524967193603516, 7.764101028442383, 7.750988006591797, 7.2460174560546875, 6.927967071533203, 7.822990417480469, 7.27391242980957, 7.730960845947266, 7.4710845947265625, 7.4310302734375, 7.33494758605957, 7.455110549926758, 7.021188735961914, 7.083892822265625, 7.812976837158203, 7.625102996826172, 7.447957992553711, 7.179021835327148, 7.504940032958984, 7.224082946777344, 7.257938385009766, 7.714986801147461, 7.4939727783203125, 7.6160430908203125, 7.578849792480469, 7.890939712524414, 7.546901702880859, 7.411956787109375] + + Median time (milliseconds): + 7.6071023941 + ~~~ + +Because the leaseholder for `vehicles` is in the same zone as the client request, this query took just 7.60ms compared to the similar query in New York that took 72.02ms. + +#### Writes + +The geographic distribution of data impacts write performance as well. For example, imagine 100 people in Seattle and 100 people in New York want to create new Movr accounts: + +1. SSH to the instance in `us-west1-a` with the Python client. + +1. Create 100 Seattle-based users: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {{page.app}} \ + --host=
\ + --statement="INSERT INTO users VALUES (gen_random_uuid(), 'seattle', 'Seatller', '111 East Street', '1736352379937347')" \ + --repeat=100 \ + --times + ~~~ + + ~~~ + Times (milliseconds): + [277.4538993835449, 50.12702941894531, 47.75214195251465, 48.13408851623535, 47.872066497802734, 48.65407943725586, 47.78695106506348, 49.14689064025879, 52.770137786865234, 49.00097846984863, 48.68602752685547, 47.387123107910156, 47.36208915710449, 47.6841926574707, 46.49209976196289, 47.06096649169922, 46.753883361816406, 46.304941177368164, 48.90894889831543, 48.63715171813965, 48.37393760681152, 49.23295974731445, 50.13418197631836, 48.310041427612305, 48.57516288757324, 47.62911796569824, 47.77693748474121, 47.505855560302734, 47.89996147155762, 49.79205131530762, 50.76479911804199, 50.21500587463379, 48.73299598693848, 47.55592346191406, 47.35088348388672, 46.7071533203125, 43.00808906555176, 43.1060791015625, 46.02813720703125, 47.91092872619629, 68.71294975280762, 49.241065979003906, 48.9039421081543, 47.82295227050781, 48.26998710632324, 47.631025314331055, 64.51892852783203, 48.12812805175781, 67.33417510986328, 48.603057861328125, 50.31013488769531, 51.02396011352539, 51.45716667175293, 50.85396766662598, 49.07512664794922, 47.49894142150879, 44.67201232910156, 43.827056884765625, 44.412851333618164, 46.69189453125, 49.55601692199707, 49.16882514953613, 49.88598823547363, 49.31306838989258, 46.875, 46.69594764709473, 48.31886291503906, 48.378944396972656, 49.0570068359375, 49.417972564697266, 48.22111129760742, 50.662994384765625, 50.58097839355469, 75.44088363647461, 51.05400085449219, 50.85110664367676, 48.187971115112305, 56.7781925201416, 42.47403144836426, 46.2191104888916, 53.96890640258789, 46.697139739990234, 48.99096488952637, 49.1330623626709, 46.34690284729004, 47.09315299987793, 46.39410972595215, 46.51689529418945, 47.58000373840332, 47.924041748046875, 48.426151275634766, 50.22597312927246, 50.1859188079834, 50.37498474121094, 49.861907958984375, 51.477909088134766, 73.09293746948242, 48.779964447021484, 45.13692855834961, 42.2968864440918] + + Median time (milliseconds): + 48.4025478363 + ~~~ + +1. SSH to the instance in `us-east1-b` with the Python client. + +1. Create 100 new NY-based users: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {{page.app}} \ + --host=
\ + --statement="INSERT INTO users VALUES (gen_random_uuid(), 'new york', 'New Yorker', '111 West Street', '9822222379937347')" \ + --repeat=100 \ + --times + ~~~ + + ~~~ + Times (milliseconds): + [131.05082511901855, 116.88899993896484, 115.15498161315918, 117.095947265625, 121.04082107543945, 115.8750057220459, 113.80696296691895, 113.05880546569824, 118.41201782226562, 125.30899047851562, 117.5389289855957, 115.23890495300293, 116.84799194335938, 120.0411319732666, 115.62800407409668, 115.08989334106445, 113.37089538574219, 115.15498161315918, 115.96989631652832, 133.1961154937744, 114.25995826721191, 118.09396743774414, 122.24102020263672, 116.14608764648438, 114.80998992919922, 131.9139003753662, 114.54391479492188, 115.15307426452637, 116.7759895324707, 135.10799407958984, 117.18511581420898, 120.15485763549805, 118.0570125579834, 114.52388763427734, 115.28396606445312, 130.00011444091797, 126.45292282104492, 142.69423484802246, 117.60401725769043, 134.08493995666504, 117.47002601623535, 115.75007438659668, 117.98381805419922, 115.83089828491211, 114.88890647888184, 113.23404312133789, 121.1700439453125, 117.84791946411133, 115.35286903381348, 115.0820255279541, 116.99700355529785, 116.67394638061523, 116.1041259765625, 114.67289924621582, 112.98894882202148, 117.1119213104248, 119.78602409362793, 114.57300186157227, 129.58717346191406, 118.37983131408691, 126.68204307556152, 118.30306053161621, 113.27195167541504, 114.22920227050781, 115.80777168273926, 116.81294441223145, 114.76683616638184, 115.1430606842041, 117.29192733764648, 118.24417114257812, 116.56999588012695, 113.8620376586914, 114.88819122314453, 120.80597877502441, 132.39002227783203, 131.00910186767578, 114.56179618835449, 117.03896522521973, 117.72680282592773, 115.6010627746582, 115.27681350708008, 114.52317237854004, 114.87483978271484, 117.78903007507324, 116.65701866149902, 122.6949691772461, 117.65193939208984, 120.5449104309082, 115.61179161071777, 117.54202842712402, 114.70890045166016, 113.58809471130371, 129.7171115875244, 117.57993698120117, 117.1119213104248, 117.64001846313477, 140.66505432128906, 136.41691207885742, 116.24789237976074, 115.19908905029297] + + Median time (milliseconds): + 116.868495941 + ~~~ + +It took 48.40ms to create a user in Seattle and 116.86ms to create a user in New York. To better understand this discrepancy, let's look at the distribution of data for the `users` table: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach sql \ +{{page.certs}} \ +--host=
\ +--database=movr \ +--execute="SHOW EXPERIMENTAL_RANGES FROM TABLE users;" +~~~ + +~~~ + start_key | end_key | range_id | replicas | lease_holder ++-----------+---------+----------+----------+--------------+ + NULL | NULL | 49 | {2,6,8} | 6 +(1 row) +~~~ + +For the single range containing `users` data, one replica is in each zone, with the leaseholder in the `us-west1-a` zone. This means that: + +- When creating a user in Seattle, the request doesn't have to leave the zone to reach the leaseholder. However, since a write requires consensus from its replica group, the write has to wait for confirmation from either the replica in `us-west1-b` (Los Angeles) or `us-east1-b` (New York) before committing and then returning confirmation to the client. +- When creating a user in New York, there are more network hops and, thus, increased latency. The request first needs to travel across the continent to the leaseholder in `us-west1-a`. It then has to wait for confirmation from either the replica in `us-west1-b` (Los Angeles) or `us-east1-b` (New York) before committing and then returning confirmation to the client back in the east. diff --git a/src/current/_includes/v25.3/performance/transaction-retry-error-actions.md b/src/current/_includes/v25.3/performance/transaction-retry-error-actions.md new file mode 100644 index 00000000000..b528f7b4f84 --- /dev/null +++ b/src/current/_includes/v25.3/performance/transaction-retry-error-actions.md @@ -0,0 +1,5 @@ +In most cases, the correct actions to take when encountering transaction retry errors are: + +1. Under `SERIALIZABLE` isolation, update your application to support [client-side retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling) when transaction retry errors are encountered. Follow the guidance for the [specific error type]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#transaction-retry-error-reference). + +1. Take steps to [minimize transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#minimize-transaction-retry-errors) in the first place. This means reducing transaction contention overall, and increasing the likelihood that CockroachDB can [automatically retry]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) a failed transaction. \ No newline at end of file diff --git a/src/current/_includes/v25.3/performance/tuning-secure.py b/src/current/_includes/v25.3/performance/tuning-secure.py new file mode 100644 index 00000000000..a644dbb1c87 --- /dev/null +++ b/src/current/_includes/v25.3/performance/tuning-secure.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +import argparse +import psycopg2 +import time + +parser = argparse.ArgumentParser( + description="test performance of statements against movr database") +parser.add_argument("--host", required=True, + help="ip address of one of the CockroachDB nodes") +parser.add_argument("--statement", required=True, + help="statement to execute") +parser.add_argument("--repeat", type=int, + help="number of times to repeat the statement", default = 20) +parser.add_argument("--times", + help="print time for each repetition of the statement", action="store_true") +parser.add_argument("--cumulative", + help="print cumulative time for all repetitions of the statement", action="store_true") +args = parser.parse_args() + +conn = psycopg2.connect( + database='movr', + user='root', + host=args.host, + port=26257, + sslmode='require', + sslrootcert='certs/ca.crt', + sslkey='certs/client.root.key', + sslcert='certs/client.root.crt' +) +conn.set_session(autocommit=True) +cur = conn.cursor() + +def median(lst): + n = len(lst) + if n < 1: + return None + if n % 2 == 1: + return sorted(lst)[n//2] + else: + return sum(sorted(lst)[n//2-1:n//2+1])/2.0 + +times = list() +for n in range(args.repeat): + start = time.time() + statement = args.statement + cur.execute(statement) + if n < 1: + if cur.description is not None: + colnames = [desc[0] for desc in cur.description] + print("") + print("Result:") + print(colnames) + rows = cur.fetchall() + for row in rows: + print([str(cell) for cell in row]) + end = time.time() + times.append((end - start)* 1000) + +cur.close() +conn.close() + +print("") +if args.times: + print("Times (milliseconds):") + print(times) + print("") +# print("Average time (milliseconds):") +# print(float(sum(times))/len(times)) +# print("") +print("Median time (milliseconds):") +print(median(times)) +print("") +if args.cumulative: + print("Cumulative time (milliseconds):") + print(sum(times)) + print("") diff --git a/src/current/_includes/v25.3/performance/tuning.py b/src/current/_includes/v25.3/performance/tuning.py new file mode 100644 index 00000000000..dcb567dad91 --- /dev/null +++ b/src/current/_includes/v25.3/performance/tuning.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +import argparse +import psycopg2 +import time + +parser = argparse.ArgumentParser( + description="test performance of statements against movr database") +parser.add_argument("--host", required=True, + help="ip address of one of the CockroachDB nodes") +parser.add_argument("--statement", required=True, + help="statement to execute") +parser.add_argument("--repeat", type=int, + help="number of times to repeat the statement", default = 20) +parser.add_argument("--times", + help="print time for each repetition of the statement", action="store_true") +parser.add_argument("--cumulative", + help="print cumulative time for all repetitions of the statement", action="store_true") +args = parser.parse_args() + +conn = psycopg2.connect( + database='movr', + user='root', + host=args.host, + port=26257 +) +conn.set_session(autocommit=True) +cur = conn.cursor() + +def median(lst): + n = len(lst) + if n < 1: + return None + if n % 2 == 1: + return sorted(lst)[n//2] + else: + return sum(sorted(lst)[n//2-1:n//2+1])/2.0 + +times = list() +for n in range(args.repeat): + start = time.time() + statement = args.statement + cur.execute(statement) + if n < 1: + if cur.description is not None: + colnames = [desc[0] for desc in cur.description] + print("") + print("Result:") + print(colnames) + rows = cur.fetchall() + for row in rows: + print([str(cell) for cell in row]) + end = time.time() + times.append((end - start)* 1000) + +cur.close() +conn.close() + +print("") +if args.times: + print("Times (milliseconds):") + print(times) + print("") +# print("Average time (milliseconds):") +# print(float(sum(times))/len(times)) +# print("") +print("Median time (milliseconds):") +print(median(times)) +print("") +if args.cumulative: + print("Cumulative time (milliseconds):") + print(sum(times)) + print("") diff --git a/src/current/_includes/v25.3/performance/use-hash-sharded-indexes.md b/src/current/_includes/v25.3/performance/use-hash-sharded-indexes.md new file mode 100644 index 00000000000..314b0c24f5f --- /dev/null +++ b/src/current/_includes/v25.3/performance/use-hash-sharded-indexes.md @@ -0,0 +1 @@ +We [discourage indexing on sequential keys]({% link {{ page.version.version }}/schema-design-indexes.md %}#best-practices). If a table **must** be indexed on sequential keys, use [hash-sharded indexes]({% link {{ page.version.version }}/hash-sharded-indexes.md %}). Hash-sharded indexes distribute sequential traffic uniformly across ranges, eliminating single-range [hotspots]({% link {{ page.version.version }}/understand-hotspots.md %}) and improving write performance on sequentially-keyed indexes at a small cost to read performance. \ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/alter-virtual-cluster-diagram.html b/src/current/_includes/v25.3/physical-replication/alter-virtual-cluster-diagram.html new file mode 100644 index 00000000000..c5400f6e9ed --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/alter-virtual-cluster-diagram.html @@ -0,0 +1,431 @@ + +
+ + + + + + ALTER + + + VIRTUAL + + + CLUSTER + + + virtual_cluster_spec + + + + PAUSE + + + RESUME + + + REPLICATION + + + COMPLETE + + + REPLICATION + + + TO + + + SYSTEM + + + TIME + + + timestamp + + + + LATEST + + + SET + + + REPLICATION + + + RETENTION + + + EXPIRATION + + + WINDOW + + + = + + + duration + + + + , + + + START + + + REPLICATION + + + OF + + + virtual_cluster_spec + + + + ON + + + physical_cluster + + + + WITH + + + RETENTION + + + EXPIRATION + + + WINDOW + + + = + + + duration + + + + , + + + OPTIONS + + + ( + + + RETENTION + + + EXPIRATION + + + WINDOW + + + = + + + duration + + + + , + + + ) + + + SERVICE + + + SHARED + + + RENAME + + + TO + + + virtual_cluster_spec + + + + STOP + + + SERVICE + + + GRANT + + + REVOKE + + + ALL + + + CAPABILITIES + + + CAPABILITY + + + virtual_cluster_capability_list + + + + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/create-virtual-cluster-diagram.html b/src/current/_includes/v25.3/physical-replication/create-virtual-cluster-diagram.html new file mode 100644 index 00000000000..2f0eff15eb9 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/create-virtual-cluster-diagram.html @@ -0,0 +1,171 @@ + +
+ + + + + + CREATE + + + VIRTUAL + + + CLUSTER + + + IF + + + NOT + + + EXISTS + + + + virtual_cluster_name + + + + LIKE + + + + virtual_cluster_spec + + + + FROM + + + REPLICATION + + + OF + + + + primary_virtual_cluster + + + + ON + + + + primary_connection_string + + + + WITH + + + + replication_options_list + + + + OPTIONS + + + ( + + + + replication_options_list + + + + ) + + + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/drop-virtual-cluster-diagram.html b/src/current/_includes/v25.3/physical-replication/drop-virtual-cluster-diagram.html new file mode 100644 index 00000000000..df3fc0eb983 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/drop-virtual-cluster-diagram.html @@ -0,0 +1,65 @@ + +
+ + + + + + DROP + + + VIRTUAL + + + CLUSTER + + + IF + + + EXISTS + + + + virtual_cluster_spec + + + + IMMEDIATE + + + + +
diff --git a/src/current/_includes/v25.3/physical-replication/failover-read-virtual-cluster.md b/src/current/_includes/v25.3/physical-replication/failover-read-virtual-cluster.md new file mode 100644 index 00000000000..7430d38f9a9 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/failover-read-virtual-cluster.md @@ -0,0 +1 @@ +If you started the PCR stream with the `READ VIRTUAL CLUSTER` option, failing over with `SYSTEM TIME` will destroy the `readonly` virtual cluster. If you fail over with `LATEST`, the `readonly` virtual cluster will remain on the original standby cluster, but will **not** update with new writes. \ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/fast-failback-latest-timestamp.md b/src/current/_includes/v25.3/physical-replication/fast-failback-latest-timestamp.md new file mode 100644 index 00000000000..a950221bb9b --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/fast-failback-latest-timestamp.md @@ -0,0 +1 @@ +When you [fail back]({% link {{ page.version.version }}/failover-replication.md %}#failback) to a cluster that was previously the primary cluster, you should fail over to the `LATEST` timestamp. Using a [historical timestamp]({% link {{ page.version.version }}/as-of-system-time.md %}) may lead to the failback failing. {% if page.name == "failover-replication.md" %} Refer to the [PCR known limitations]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}#known-limitations).{% endif %} diff --git a/src/current/_includes/v25.3/physical-replication/interface-virtual-cluster.md b/src/current/_includes/v25.3/physical-replication/interface-virtual-cluster.md new file mode 100644 index 00000000000..02890c3fc83 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/interface-virtual-cluster.md @@ -0,0 +1,2 @@ +- The system virtual cluster manages the cluster's control plane and the replication of the cluster's data. Admins connect to the system virtual cluster to configure and manage the underlying CockroachDB cluster, set up PCR, create and manage a virtual cluster, and observe metrics and logs for the CockroachDB cluster and each virtual cluster. +- Each other virtual cluster manages its own data plane. Users connect to a virtual cluster by default, rather than the system virtual cluster. To connect to the system virtual cluster, the connection string must be modified. Virtual clusters contain user data and run application workloads. When PCR is enabled, the non-system virtual cluster on both primary and secondary clusters is named `main`. diff --git a/src/current/_includes/v25.3/physical-replication/like-description.md b/src/current/_includes/v25.3/physical-replication/like-description.md new file mode 100644 index 00000000000..5922c4a6463 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/like-description.md @@ -0,0 +1 @@ +Including the `LIKE template` parameter ensures that the virtual cluster on the standby is created with the correct capabilities, which manage what the virtual cluster can do. `LIKE` will refer to a virtual cluster on the CockroachDB cluster you're running the statement from. \ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/phys-rep-sql-pages.md b/src/current/_includes/v25.3/physical-replication/phys-rep-sql-pages.md new file mode 100644 index 00000000000..562905ff97e --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/phys-rep-sql-pages.md @@ -0,0 +1,5 @@ +PCR happens between an _active_ primary cluster and a _passive_ standby cluster that accepts updates from the primary cluster. The unit of replication is a _virtual cluster_, which is part of the underlying infrastructure in the primary and standby clusters. The CockroachDB cluster has: + +{% include {{ page.version.version }}/physical-replication/interface-virtual-cluster.md %} + +For more detail, refer to the [Physical Cluster Replication Overview]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}). diff --git a/src/current/_includes/v25.3/physical-replication/reference-links-replication.md b/src/current/_includes/v25.3/physical-replication/reference-links-replication.md new file mode 100644 index 00000000000..5d7b017b9fb --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/reference-links-replication.md @@ -0,0 +1,4 @@ +{% comment %} +- Cluster virtualization: The primary and standby clusters are started as virtualized clusters. +- [Physical Cluster Replication Technical Overview]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) page. +{% endcomment %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/retention.md b/src/current/_includes/v25.3/physical-replication/retention.md new file mode 100644 index 00000000000..303fe6ebc79 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/retention.md @@ -0,0 +1 @@ +We do not recommend setting `RETENTION` much higher than the 24-hour default on the standby cluster. Accumulated data from an excessive [retention (failover) window]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) could affect queries running on the standby cluster that is active following a [failover]({% link {{ page.version.version }}/failover-replication.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-data-state.md b/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-data-state.md new file mode 100644 index 00000000000..16f858ef621 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-data-state.md @@ -0,0 +1,10 @@ +State | Description +-----------+---------------- +`add` | ([**Preview**]({% link {{ page.version.version }}/cockroachdb-feature-availability.md %}#features-in-preview)) The [`readonly` virtual cluster]({% link {{ page.version.version }}/create-virtual-cluster.md %}#start-a-pcr-stream-with-read-from-standby) is waiting for the PCR job's initial scan to complete, then `readonly` will be available for read queries. +`initializing replication` | The replication job is completing the initial scan of data from the primary cluster before it starts replicating data in real time. +`ready` | A virtual cluster's data is ready for use. The `readonly` virtual cluster is ready to serve read queries. +`replicating` | The replication job has started and is replicating data. +`replication paused` | The replication job is paused due to an error or a manual request with [`ALTER VIRTUAL CLUSTER ... PAUSE REPLICATION`]({% link {{ page.version.version }}/alter-virtual-cluster.md %}). +`replication pending failover` | The replication job is running and the failover time has been set. Once the the replication reaches the failover time, the failover will begin automatically. +`replication failing over` | The job has started failing over. The failover time can no longer be changed. Once failover is complete, a virtual cluster will be available for use with [`ALTER VIRTUAL CLUSTER ... START SERVICE SHARED`]({% link {{ page.version.version }}/alter-virtual-cluster.md %}). +`replication error` | An error has occurred. You can find more detail in the error message and the [logs]({% link {{ page.version.version }}/configure-logs.md %}). **Note:** A PCR job will retry for 3 minutes before failing. diff --git a/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-diagram.html b/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-diagram.html new file mode 100644 index 00000000000..f9d31ede888 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-diagram.html @@ -0,0 +1,89 @@ + +
+ + + + + + SHOW + + + VIRTUAL + + + CLUSTERS + + + CLUSTER + + + + virtual_cluster_spec + + + + WITH + + + REPLICATION + + + STATUS + + + CAPABILITIES + + + , + + + + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-responses.md b/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-responses.md new file mode 100644 index 00000000000..97c962a2547 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/show-virtual-cluster-responses.md @@ -0,0 +1,15 @@ +Field | Response +---------+---------- +`id` | The ID of a virtual cluster. +`name` | The name of the standby (destination) virtual cluster. +`data_state` | The state of the data on a virtual cluster. This can show one of the following: `initializing replication`, `ready`, `replicating`, `replication paused`, `replication pending failover`, `replication failing over`, `replication error`. Refer to [Data state](#data-state) for more detail on each response. +`service_mode` | The service mode shows whether a virtual cluster is ready to accept SQL requests. This can show `none` or `shared`. When `shared`, a virtual cluster's SQL connections will be served by the same nodes that are serving the system virtual cluster. +`source_tenant_name` | The name of the primary (source) virtual cluster. +`source_cluster_uri` | The URI of the primary (source) cluster. The standby cluster connects to the primary cluster using this URI when [starting a replication stream]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-4-start-replication). +`replicated_time` | The latest timestamp at which the standby cluster has consistent data — that is, the latest time you can fail over to. This time advances automatically as long as the replication proceeds without error. `replicated_time` is updated periodically (every `30s`). +`retained_time` | The earliest timestamp at which the standby cluster has consistent data — that is, the earliest time you can fail over to. +`replication_lag` | The time between the most up-to-date replicated time and the actual time. Refer to the [Technical Overview]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}) for more detail. +`failover_time` | The time at which the failover will begin. This can be in the past or the future. Refer to [Fail over to a point in time]({% link {{ page.version.version }}/failover-replication.md %}#fail-over-to-a-point-in-time). +`status` | The status of the replication stream. This can show one of the following: `initializing replication`, `ready`, `replicating`, `replication paused`, `replication pending failover`, `replication failing over`, `replication error`. Refer to [Data state](#data-state) for more detail on each response. +`capability_name` | The [capability]({% link {{ page.version.version }}/create-virtual-cluster.md %}#capabilities) name. +`capability_value` | Whether the [capability]({% link {{ page.version.version }}/create-virtual-cluster.md %}#capabilities) is enabled for a virtual cluster. diff --git a/src/current/_includes/v25.3/physical-replication/template-description.md b/src/current/_includes/v25.3/physical-replication/template-description.md new file mode 100644 index 00000000000..233b31f99b1 --- /dev/null +++ b/src/current/_includes/v25.3/physical-replication/template-description.md @@ -0,0 +1 @@ +The [configuration profile](#start-the-standby-cluster) included at startup creates the `template` virtual cluster with the same set of _capabilities_ per CockroachDB version. When you start a replication stream, you can specify the `template` VC with `LIKE` to ensure other virtual clusters on the standby cluster will work in the same way. Refer to [Step 4: Start replication](#step-4-start-replication) for syntax details. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/advertise-addr-join.md b/src/current/_includes/v25.3/prod-deployment/advertise-addr-join.md new file mode 100644 index 00000000000..2c8d39660fb --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/advertise-addr-join.md @@ -0,0 +1,4 @@ +Flag | Description +-----|------------ +`--advertise-addr` | Specifies the IP address/hostname and port to tell other nodes to use. The port number can be omitted, in which case it defaults to `26257`.

This value must route to an IP address the node is listening on (with `--listen-addr` unspecified, the node listens on all IP addresses).

In some networking scenarios, you may need to use `--advertise-addr` and/or `--listen-addr` differently. For more details, see [Networking]({% link {{ page.version.version }}/recommended-production-settings.md %}#networking). +`--join` | Identifies the address of 3-5 of the initial nodes of the cluster. These addresses should match the addresses that the target nodes are advertising. diff --git a/src/current/_includes/v25.3/prod-deployment/aws-inbound-rules.md b/src/current/_includes/v25.3/prod-deployment/aws-inbound-rules.md new file mode 100644 index 00000000000..8be748205a6 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/aws-inbound-rules.md @@ -0,0 +1,31 @@ +#### Inter-node and load balancer-node communication + + Field | Value +-------|------------------- + Port Range | **26257** + Source | The ID of your security group (e.g., *sg-07ab277a*) + +#### Application data + + Field | Value +-------|------------------- + Port Range | **26257** + Source | Your application's IP ranges + +#### DB Console + + Field | Value +-------|------------------- + Port Range | **8080** + Source | Your network's IP ranges + +You can set your network IP by selecting "My IP" in the Source field. + +#### Load balancer-health check communication + + Field | Value +-------|------------------- + Port Range | **8080** + Source | The IP range of your VPC in CIDR notation (e.g., 10.12.0.0/16) + + To get the IP range of a VPC, open the [Amazon VPC console](https://console.aws.amazon.com/vpc/) and find the VPC listed in the section called Your VPCs. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/backup.sh b/src/current/_includes/v25.3/prod-deployment/backup.sh new file mode 100644 index 00000000000..efcbd4c7041 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/backup.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -euo pipefail + +# This script creates full backups when run on the configured +# day of the week and incremental backups when run on other days, and tracks +# recently created backups in a file to pass as the base for incremental backups. + +what="" # Leave empty for cluster backup, or add "DATABASE database_name" to backup a database. +base="/backups" # The URL where you want to store the backup. +extra="" # Any additional parameters that need to be appended to the BACKUP URI e.g., AWS key params. +recent=recent_backups.txt # File in which recent backups are tracked. +backup_parameters= # e.g., "WITH revision_history" + +# Customize the `cockroach sql` command with `--host`, `--certs-dir` or `--insecure`, `--port`, and additional flags as needed to connect to the SQL client. +runsql() { cockroach sql --insecure -e "$1"; } + +destination="${base}/$(date +"%Y-%V")${extra}" # %V is the week number of the year, with Monday as the first day of the week. + +runsql "BACKUP $what TO '$destination' AS OF SYSTEM TIME '-1m' $backup_parameters" +echo "backed up to ${destination}" diff --git a/src/current/_includes/v25.3/prod-deployment/check-sql-query-performance.md b/src/current/_includes/v25.3/prod-deployment/check-sql-query-performance.md new file mode 100644 index 00000000000..5fffceed436 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/check-sql-query-performance.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If you aren't sure whether SQL query performance needs to be improved on your cluster, see [Identify slow statements]({% link {{ page.version.version }}/query-behavior-troubleshooting.md %}#identify-slow-queries). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/cluster-unavailable-monitoring.md b/src/current/_includes/v25.3/prod-deployment/cluster-unavailable-monitoring.md new file mode 100644 index 00000000000..70f7e08e47f --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/cluster-unavailable-monitoring.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +If the cluster becomes unavailable, the DB Console and Cluster API will also become unavailable. You can continue to monitor the cluster via the [Prometheus endpoint]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint) and [logs]({% link {{ page.version.version }}/logging-overview.md %}). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md new file mode 100644 index 00000000000..b267379384b --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md @@ -0,0 +1,18 @@ +By default, CockroachDB will perform a set of "decommissioning pre-flight checks". That is, decommission pre-checks look over the ranges with replicas on the to-be-decommissioned node, and check that each replica can be moved to some other node in the cluster. If errors are detected that would result in the inability to complete node decommissioning, they will be printed to `STDERR` and the command will exit *without attempting to perform node decommissioning*. For example, ranges that require a certain number of voting replicas in a region but do not have any available nodes in the region not already containing a replica will block the decommissioning process. + +The error format is shown below: + +~~~ +ranges blocking decommission detected +n1 has 44 replicas blocked with error: "0 of 1 live stores are able to take a new replica for the range (2 already have a voter, 0 already have a non-voter); likely not enough nodes in cluster" +n2 has 27 replicas blocked with error: "0 of 1 live stores are able to take a new replica for the range (2 already have a voter, 0 already have a non-voter); likely not enough nodes in cluster" + +ERROR: Cannot decommission nodes. +Failed running "node decommission" +~~~ + +These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). + +{{site.data.alerts.callout_info}} +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-command-commit-latency.md b/src/current/_includes/v25.3/prod-deployment/healthy-command-commit-latency.md new file mode 100644 index 00000000000..63fd751610c --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-command-commit-latency.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: On SSDs ([strongly recommended]({% link {{ page.version.version }}/recommended-production-settings.md %}#storage)), this should be between 1 and 100 milliseconds. On HDDs, this should be no more than 1 second. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-cpu-percent.md b/src/current/_includes/v25.3/prod-deployment/healthy-cpu-percent.md new file mode 100644 index 00000000000..a58b0b87973 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-cpu-percent.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: CPU utilized by CockroachDB should not persistently exceed 80%. Because this metric does not reflect system CPU usage, values above 80% suggest that actual CPU utilization is nearing 100%. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-crdb-memory.md b/src/current/_includes/v25.3/prod-deployment/healthy-crdb-memory.md new file mode 100644 index 00000000000..9d682d3cfb0 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-crdb-memory.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: Go Allocated will depend on workload but should not exceed [`--max-sql-memory`]({% link {{ page.version.version }}/cockroach-start.md %}#flags) by more than 100%. CGo Allocated should not exceed the [`--cache`]({% link {{ page.version.version }}/cockroach-start.md %}#flags) size and CGo Total should not exceed the [`--cache`]({% link {{ page.version.version }}/cockroach-start.md %}#flags) size by more than 15%. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-disk-ops-in-progress.md b/src/current/_includes/v25.3/prod-deployment/healthy-disk-ops-in-progress.md new file mode 100644 index 00000000000..e80714df120 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-disk-ops-in-progress.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: This value should be 0 or single-digit values for short periods of time. If the values persist in double digits, you may have an I/O bottleneck. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-lsm.md b/src/current/_includes/v25.3/prod-deployment/healthy-lsm.md new file mode 100644 index 00000000000..67ca6f36420 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-lsm.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: An IO Overload value greater than 1.0 generally indicates an overload in the Pebble LSM tree. High values indicate heavy write load that is causing accumulation of files in level 0. These files are not being compacted quickly enough to lower levels, resulting in a [misshapen LSM]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms). \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-node-heartbeat-latency.md b/src/current/_includes/v25.3/prod-deployment/healthy-node-heartbeat-latency.md new file mode 100644 index 00000000000..982514be9c9 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-node-heartbeat-latency.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: Less than 100ms in addition to the [network latency]({% link {{ page.version.version }}/ui-network-latency-page.md %}) between nodes in the cluster. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-read-amplification.md b/src/current/_includes/v25.3/prod-deployment/healthy-read-amplification.md new file mode 100644 index 00000000000..c7ffe9c6d17 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-read-amplification.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: Read amplification factor should be in the single digits. A value exceeding 50 for 1 hour strongly suggests that the LSM tree has an unhealthy shape. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-sql-memory.md b/src/current/_includes/v25.3/prod-deployment/healthy-sql-memory.md new file mode 100644 index 00000000000..968b79b0b61 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-sql-memory.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: This value should not exceed the [`--max-sql-memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size) size. A healthy threshold is 75% of allocated `--max-sql-memory`. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-storage-capacity.md b/src/current/_includes/v25.3/prod-deployment/healthy-storage-capacity.md new file mode 100644 index 00000000000..bd8c44e1a31 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-storage-capacity.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: Used capacity should not persistently exceed 80% of the total capacity. diff --git a/src/current/_includes/v25.3/prod-deployment/healthy-workload-concurrency.md b/src/current/_includes/v25.3/prod-deployment/healthy-workload-concurrency.md new file mode 100644 index 00000000000..8c0c8e1ffc8 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/healthy-workload-concurrency.md @@ -0,0 +1 @@ +**Expected values for a healthy cluster**: At any time, the total number of connections actively executing SQL statements should not exceed 4 times the number of vCPUs in the cluster. You can find them in the Active Executions view in the [DB Console]({% link {{ page.version.version }}/ui-statements-page.md %}) or [Cloud Console]({% link cockroachcloud/statements-page.md %}). You can find the number of open connections in the [DB Console]({% link {{ page.version.version }}/ui-sql-dashboard.md %}#open-sql-sessions) or [Cloud Console]({% link cockroachcloud/metrics-sql.md %}#open-sql-sessions). For more details on configuring connection pools, see [Size connection pools](connection-pooling.html#size-connection-pools). \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-flag.md b/src/current/_includes/v25.3/prod-deployment/insecure-flag.md new file mode 100644 index 00000000000..a13951ba4bc --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-flag.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +The `--insecure` flag used in this tutorial is intended for non-production testing only. To run CockroachDB in production, use a secure cluster instead. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-initialize-cluster.md b/src/current/_includes/v25.3/prod-deployment/insecure-initialize-cluster.md new file mode 100644 index 00000000000..01cbad5a6ac --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-initialize-cluster.md @@ -0,0 +1,12 @@ +On your local machine, complete the node startup process and have them join together as a cluster: + +1. [Install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}) on your local machine, if you haven't already. + +1. Run the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command, with the `--host` flag set to the address of any node: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach init --insecure --host=
+ ~~~ + + Each node then prints helpful details to the [standard output]({% link {{ page.version.version }}/cockroach-start.md %}#standard-output), such as the CockroachDB version, the URL for the DB Console, and the SQL URL for clients. diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-recommendations.md b/src/current/_includes/v25.3/prod-deployment/insecure-recommendations.md new file mode 100644 index 00000000000..5ca9e9d4175 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-recommendations.md @@ -0,0 +1,13 @@ +- Consider using a [secure cluster]({% link {{ page.version.version }}/manual-deployment.md %}) instead. Using an insecure cluster comes with risks: + - Your cluster is open to any client that can access any node's IP addresses. + - Any user, even `root`, can log in without providing a password. + - Any user, connecting as `root`, can read or write any data in your cluster. + - There is no network encryption or authentication, and thus no confidentiality. + +- Decide how you want to access your DB Console: + + Access Level | Description + -------------|------------ + Partially open | Set a firewall rule to allow only specific IP addresses to communicate on port `8080`. + Completely open | Set a firewall rule to allow all IP addresses to communicate on port `8080`. + Completely closed | Set a firewall rule to disallow all communication on port `8080`. In this case, a machine with SSH access to a node could use an SSH tunnel to access the DB Console. diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-requirements.md b/src/current/_includes/v25.3/prod-deployment/insecure-requirements.md new file mode 100644 index 00000000000..3334d2955b9 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-requirements.md @@ -0,0 +1,9 @@ +- You must have [SSH access]({{page.ssh-link}}) to each machine. This is necessary for distributing and starting CockroachDB binaries. + +- Your network configuration must allow TCP communication on the following ports: + - `26257` for intra-cluster and client-cluster communication + - `8080` to expose your DB Console + +- Carefully review the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}) and recommended [Topology Patterns]({% link {{ page.version.version }}/topology-patterns.md %}). + +{% include {{ page.version.version }}/prod-deployment/topology-recommendations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-scale-cluster.md b/src/current/_includes/v25.3/prod-deployment/insecure-scale-cluster.md new file mode 100644 index 00000000000..aaf0e5f7688 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-scale-cluster.md @@ -0,0 +1,120 @@ +You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/). + +
+ + +
+

+ +
+ +For each additional node you want to add to the cluster, complete the following steps: + +1. SSH to the machine where you want the node to run. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command, passing the new node's address as the `--advertise-addr` flag and pointing `--join` to the three existing nodes (also include `--locality` if you set it earlier). + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + --insecure \ + --advertise-addr= \ + --join=,, \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + +1. Update your load balancer to recognize the new node. + +
+ +
+ +For each additional node you want to add to the cluster, complete the following steps: + +1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + curl -o cockroach-{{ page.release_info.version }}.linux-amd64.tgz; tar xzvf cockroach-{{ page.release_info.version }}.linux-amd64.tgz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Create the Cockroach directory: + + {% include_cached copy-clipboard.html %} + ~~~ shell + mkdir /var/lib/cockroach + ~~~ + +1. Create a Unix user named `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + useradd cockroach + ~~~ + +1. Change the ownership of the `cockroach` directory to the user `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + chown cockroach /var/lib/cockroach + ~~~ + +1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service): + + {% include_cached copy-clipboard.html %} + ~~~ shell + curl -o insecurecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service + ~~~ + + Alternatively, you can create the file yourself and copy the script into it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {% include {{ page.version.version }}/prod-deployment/insecurecockroachdb.service %} + ~~~ + + {{site.data.alerts.callout_info}} + Previously, the sample configuration file set `TimeoutStopSec` to 60 seconds. This recommendation has been lengthened to 300 seconds, to give the `cockroach` process more time to stop gracefully. + {{site.data.alerts.end}} + + Save the file in the `/etc/systemd/system/` directory + +1. Customize the sample configuration template for your deployment: + + Specify values for the following flags in the sample configuration template: + + {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %} + +1. Repeat these steps for each additional node that you want in your cluster. + +
diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-start-nodes.md b/src/current/_includes/v25.3/prod-deployment/insecure-start-nodes.md new file mode 100644 index 00000000000..75d0de816b5 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-start-nodes.md @@ -0,0 +1,165 @@ +You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/). + +
+ + +
+

+ +
+ +For each initial node of your cluster, complete the following steps: + +{{site.data.alerts.callout_info}} +After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step. +{{site.data.alerts.end}} + +1. Visit [Releases]({% link releases/index.md %}) and download the full binary of CockroachDB to the node. + +1. On the node, follow the instructions to [install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}). + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + --insecure \ + --advertise-addr= \ + --join=,, \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + + This command primes the node to start, using the following flags: + + Flag | Description + -----|------------ + `--insecure` | Indicates that the cluster is insecure, with no network encryption or authentication. + `--advertise-addr` | Specifies the IP address/hostname and port to tell other nodes to use. The port number can be omitted, in which case it defaults to `26257`.

This value must route to an IP address the node is listening on (with `--listen-addr` unspecified, the node listens on all IP addresses).

In some networking scenarios, you may need to use `--advertise-addr` and/or `--listen-addr` differently. For more details, see [Networking]({% link {{ page.version.version }}/recommended-production-settings.md %}#networking). + `--join` | Identifies the address of 3-5 of the initial nodes of the cluster. These addresses should match the addresses that the target nodes are advertising. + `--cache`
`--max-sql-memory` | Increases the node's cache size to 25% of available system memory to improve read performance. The capacity for in-memory SQL processing defaults to 25% of system memory but can be raised, if necessary, to increase the number of simultaneous client connections allowed by the node as well as the node's capacity for in-memory processing of rows when using `ORDER BY`, `GROUP BY`, `DISTINCT`, joins, and window functions. For more details, see [Cache and SQL Memory Size]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). + `--background` | Starts the node in the background so you gain control of the terminal to issue more commands. + + When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. It is also required to use certain enterprise features. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality). + + For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-addr=localhost:8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}). + +1. Repeat these steps for each additional node that you want in your cluster. + +
+ +
+ +For each initial node of your cluster, complete the following steps: + +{{site.data.alerts.callout_info}} +After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step. +{{site.data.alerts.end}} + +1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. CockroachDB uses custom-built versions of the [GEOS]({% link {{ page.version.version }}/architecture/glossary.md %}#geos) libraries. Copy these libraries to the location where CockroachDB expects to find them: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir -p /usr/local/lib/cockroach + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos.so /usr/local/lib/cockroach/ + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos_c.so /usr/local/lib/cockroach/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Create the Cockroach directory: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir /var/lib/cockroach + ~~~ + +1. Create a Unix user named `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ useradd cockroach + ~~~ + +1. Change the ownership of the `cockroach` directory to the user `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ chown cockroach /var/lib/cockroach + ~~~ + +1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service) and save the file in the `/etc/systemd/system/` directory: + + {% include_cached copy-clipboard.html %} + ~~~ shell + curl -o insecurecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service + ~~~ + + Alternatively, you can create the file yourself and copy the script into it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {% include {{ page.version.version }}/prod-deployment/insecurecockroachdb.service %} + ~~~ + + {{site.data.alerts.callout_info}} + Previously, the sample configuration file set `TimeoutStopSec` to 60 seconds. This recommendation has been lengthened to 300 seconds, to give the `cockroach` process more time to stop gracefully. + {{site.data.alerts.end}} + +1. In the sample configuration template, specify values for the following flags: + + {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %} + + When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. It is also required to use certain enterprise features. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality). + + For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-port=8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}). + +1. Start the CockroachDB cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ systemctl start insecurecockroachdb + ~~~ + +1. Configure `systemd` to start CockroachDB automatically after a reboot: + + {% include_cached copy-clipboard.html %} + ~~~ shell + systemctl enable insecurecockroachdb + ~~~ + +1. Repeat these steps for each additional node that you want in your cluster. + +{{site.data.alerts.callout_info}} +`systemd` handles node restarts in case of node failure. To stop a node without `systemd` restarting it, run `systemctl stop insecurecockroachdb` +{{site.data.alerts.end}} + +
diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-test-cluster.md b/src/current/_includes/v25.3/prod-deployment/insecure-test-cluster.md new file mode 100644 index 00000000000..b67b97cde01 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-test-cluster.md @@ -0,0 +1,41 @@ +CockroachDB replicates and distributes data behind-the-scenes and uses a [Gossip protocol](https://wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. Once a cluster is live, any node can be used as a SQL gateway. + +When using a load balancer, you should issue commands directly to the load balancer, which then routes traffic to the nodes. + +Use the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) locally as follows: + +1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the address of the load balancer: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --insecure --host=
+ ~~~ + +1. Create an `insecurenodetest` database: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE insecurenodetest; + ~~~ + +1. View the cluster's databases, which will include `insecurenodetest`: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SHOW DATABASES; + ~~~ + + ~~~ + +--------------------+ + | Database | + +--------------------+ + | crdb_internal | + | information_schema | + | insecurenodetest | + | pg_catalog | + | system | + +--------------------+ + (5 rows) + ~~~ + +1. Use `\q` to exit the SQL shell. diff --git a/src/current/_includes/v25.3/prod-deployment/insecure-test-load-balancing.md b/src/current/_includes/v25.3/prod-deployment/insecure-test-load-balancing.md new file mode 100644 index 00000000000..aaa2873a2ba --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecure-test-load-balancing.md @@ -0,0 +1,79 @@ +CockroachDB comes with a number of [built-in workloads]({% link {{ page.version.version }}/cockroach-workload.md %}) for simulating client traffic. This step features CockroachDB's version of the [TPC-C](http://www.tpc.org/tpcc/) workload. + +{{site.data.alerts.callout_info}} +Be sure that you have configured your network to allow traffic from the application to the load balancer. In this case, you will run the sample workload on one of your machines. The traffic source should therefore be the **internal (private)** IP address of that machine. +{{site.data.alerts.end}} + +{{site.data.alerts.callout_success}} +For comprehensive guidance on benchmarking CockroachDB with TPC-C, see [Performance Benchmarking]({% link {{ page.version.version }}/performance-benchmarking-with-tpcc-local.md %}). +{{site.data.alerts.end}} + +1. SSH to the machine where you want the run the sample TPC-C workload. + + This should be a machine that is not running a CockroachDB node. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Use the [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) command to load the initial schema and data, pointing it at the IP address of the load balancer: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach workload init tpcc \ + 'postgresql://root@:26257/tpcc?sslmode=disable' + ~~~ + +1. Use the `cockroach workload` command to run the workload for 10 minutes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach workload run tpcc \ + --duration=10m \ + 'postgresql://root@:26257/tpcc?sslmode=disable' + ~~~ + + You'll see per-operation statistics print to standard output every second: + + ~~~ + _elapsed___errors__ops/sec(inst)___ops/sec(cum)__p50(ms)__p95(ms)__p99(ms)_pMax(ms) + 1s 0 1443.4 1494.8 4.7 9.4 27.3 67.1 transfer + 2s 0 1686.5 1590.9 4.7 8.1 15.2 28.3 transfer + 3s 0 1735.7 1639.0 4.7 7.3 11.5 28.3 transfer + 4s 0 1542.6 1614.9 5.0 8.9 12.1 21.0 transfer + 5s 0 1695.9 1631.1 4.7 7.3 11.5 22.0 transfer + 6s 0 1569.2 1620.8 5.0 8.4 11.5 15.7 transfer + 7s 0 1614.6 1619.9 4.7 8.1 12.1 16.8 transfer + 8s 0 1344.4 1585.6 5.8 10.0 15.2 31.5 transfer + 9s 0 1351.9 1559.5 5.8 10.0 16.8 54.5 transfer + 10s 0 1514.8 1555.0 5.2 8.1 12.1 16.8 transfer + ... + ~~~ + + After the specified duration (10 minutes in this case), the workload will stop and you'll see totals printed to standard output: + + ~~~ + _elapsed___errors_____ops(total)___ops/sec(cum)__avg(ms)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)__result + 600.0s 0 823902 1373.2 5.8 5.5 10.0 15.2 209.7 + ~~~ + + {{site.data.alerts.callout_success}} + For more `tpcc` options, use `cockroach workload run tpcc --help`. For details about other workloads built into the `cockroach` binary, use `cockroach workload --help`. + {{site.data.alerts.end}} + +1. To monitor the load generator's progress, open the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) by pointing a browser to the address in the `admin` field in the standard output of any node on startup. + + Since the load generator is pointed at the load balancer, the connections will be evenly distributed across nodes. To verify this, click **Metrics** on the left, select the **SQL** dashboard, and then check the **SQL Connections** graph. You can use the **Graph** menu to filter the graph for specific nodes. diff --git a/src/current/_includes/v25.3/prod-deployment/insecurecockroachdb.service b/src/current/_includes/v25.3/prod-deployment/insecurecockroachdb.service new file mode 100644 index 00000000000..54d5ea2047a --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/insecurecockroachdb.service @@ -0,0 +1,16 @@ +[Unit] +Description=Cockroach Database cluster node +Requires=network.target +[Service] +Type=notify +WorkingDirectory=/var/lib/cockroach +ExecStart=/usr/local/bin/cockroach start --insecure --advertise-addr= --join=,, --cache=.25 --max-sql-memory=.25 +TimeoutStopSec=300 +Restart=always +RestartSec=10 +StandardOutput=syslog +StandardError=syslog +SyslogIdentifier=cockroach +User=cockroach +[Install] +WantedBy=default.target diff --git a/src/current/_includes/v25.3/prod-deployment/join-flag-multi-region.md b/src/current/_includes/v25.3/prod-deployment/join-flag-multi-region.md new file mode 100644 index 00000000000..6c07cf7abe0 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/join-flag-multi-region.md @@ -0,0 +1 @@ +When starting a multi-region cluster, set more than one `--join` address per region, and select nodes that are spread across failure domains. This ensures [high availability]({% link {{ page.version.version }}/architecture/replication-layer.md %}#overview). \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/join-flag-single-region.md b/src/current/_includes/v25.3/prod-deployment/join-flag-single-region.md new file mode 100644 index 00000000000..99250cdfee9 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/join-flag-single-region.md @@ -0,0 +1 @@ +For a cluster in a single region, set 3-5 `--join` addresses. Each starting node will attempt to contact one of the join hosts. In case a join host cannot be reached, the node will try another address on the list until it can join the gossip network. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/monitor-cluster.md b/src/current/_includes/v25.3/prod-deployment/monitor-cluster.md new file mode 100644 index 00000000000..b6c1fcbf609 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/monitor-cluster.md @@ -0,0 +1,3 @@ +Despite CockroachDB's various [built-in safeguards against failure]({% link {{ page.version.version }}/frequently-asked-questions.md %}#how-does-cockroachdb-survive-failures), it is critical to actively monitor the overall health and performance of a cluster running in production and to create alerting rules that promptly send notifications when there are events that require investigation or intervention. + +For details about available monitoring options and the most important events and metrics to alert on, see [Monitoring and Alerting]({% link {{ page.version.version }}/monitoring-and-alerting.md %}). diff --git a/src/current/_includes/v25.3/prod-deployment/process-termination.md b/src/current/_includes/v25.3/prod-deployment/process-termination.md new file mode 100644 index 00000000000..25dcba0fe50 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/process-termination.md @@ -0,0 +1,11 @@ +{{site.data.alerts.callout_danger}} +Cockroach Labs does not recommend terminating the `cockroach` process by sending a `SIGKILL` signal, because it bypasses CockroachDB's [node shutdown logic](#node-shutdown-sequence) and degrades the cluster's health. From the point of view of other cluster nodes, the node will be suddenly unavailable. + +- If a decommissioning node is forcibly terminated before decommission completes, [ranges will be under-replicated]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#critical-nodes-endpoint) and the cluster is at risk of [loss of quorum]({% link {{ page.version.version }}/architecture/replication-layer.md %}#overview) if an additional node experiences an outage in the window before up-replication completes. +- If a draining or decommissioning node is forcibly terminated before the operation completes, it can corrupt log files and, in certain edge cases, can result in temporary data unavailability, latency spikes, [uncertainty errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#readwithinuncertaintyintervalerror), [ambiguous commit errors]({% link {{ page.version.version }}/common-errors.md %}#result-is-ambiguous), or query timeouts. + +{{site.data.alerts.end}} + +- On production deployments, use the process manager, orchestration system, or other deployment tooling to send `SIGTERM` to the process. For example, with [`systemd`](https://www.freedesktop.org/wiki/Software/systemd/), run `systemctl stop {systemd config filename}`. + +- If you run CockroachDB in the foreground for local testing, you can use `ctrl-c` in the terminal to terminate the process. diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-cache-max-sql-memory.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-cache-max-sql-memory.md new file mode 100644 index 00000000000..4cc0a947a21 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-cache-max-sql-memory.md @@ -0,0 +1,12 @@ +{% capture formula %}{% include_cached copy-clipboard.html %}
(2 * --max-sql-memory) + --cache <= 80% of system RAM
+
+{% endcapture %} +The default value for `--cache` is 128 MiB. For production deployments, set `--cache` to `25%` or higher. To determine appropriate settings for `--cache` and `--max-sql-memory`, use the following formula: {{ formula }} + +To help guard against [OOM events]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash), CockroachDB sets a soft memory limit using mechanisms in Go. Depending on your hardware and workload, you may not need to manually tune `--max-sql-memory`. + +Test the configuration with a reasonable workload before deploying it to production. + +{{site.data.alerts.callout_info}} +On startup, if CockroachDB detects that `--max-sql-memory` or `--cache` are set too aggressively, a warning is logged. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-connection-pooling.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-connection-pooling.md new file mode 100644 index 00000000000..17b87a9988b --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-connection-pooling.md @@ -0,0 +1 @@ +The total number of workload connections across all connection pools **should not exceed 4 times the number of vCPUs** in the cluster by a large amount. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-disable-swap.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-disable-swap.md new file mode 100644 index 00000000000..f988eb016d4 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-disable-swap.md @@ -0,0 +1 @@ +Disable Linux memory swapping. Over-allocating memory on production machines can lead to unexpected performance issues when pages have to be read back into memory. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-larger-nodes.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-larger-nodes.md new file mode 100644 index 00000000000..c165a0130b7 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-larger-nodes.md @@ -0,0 +1 @@ +To optimize for throughput, use larger nodes with up to 32 vCPUs. To further increase throughput, add more nodes to the cluster instead of increasing node size. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-log-volume.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-log-volume.md new file mode 100644 index 00000000000..f6c7d8eb633 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-log-volume.md @@ -0,0 +1 @@ +Determine where CockroachDB [log files]({% link {{ page.version.version }}/configure-logs.md %}#logging-directory) will be stored: either on the same volume as the main data store or on a separate volume from the main data store. Refer to [Storage considerations for file sinks]({% link {{ page.version.version }}/logging-best-practices.md %}#storage-considerations-for-file-sinks) in Logging Best Practices. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-lvm.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-lvm.md new file mode 100644 index 00000000000..383f2a5d536 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-lvm.md @@ -0,0 +1 @@ +Do not use LVM in the I/O path. Dynamically resizing CockroachDB store volumes can result in significant performance degradation. Using LVM snapshots in lieu of CockroachDB backup and restore is also not supported. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/prod-guidance-store-volume.md b/src/current/_includes/v25.3/prod-deployment/prod-guidance-store-volume.md new file mode 100644 index 00000000000..2f1bcd9cf5a --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-guidance-store-volume.md @@ -0,0 +1 @@ +Use dedicated volumes for the CockroachDB store. Do not share the store volume with any other I/O activity. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/prod-see-also.md b/src/current/_includes/v25.3/prod-deployment/prod-see-also.md new file mode 100644 index 00000000000..88d81e565c9 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/prod-see-also.md @@ -0,0 +1,7 @@ +- [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}) +- [Manual Deployment]({% link {{ page.version.version }}/manual-deployment.md %}) +- [Orchestrated Deployment]({% link {{ page.version.version }}/kubernetes-overview.md %}) +- [Monitoring and Alerting]({% link {{ page.version.version }}/monitoring-and-alerting.md %}) +- [Performance Benchmarking]({% link {{ page.version.version }}/performance-benchmarking-with-tpcc-small.md %}) +- [Performance Tuning]({% link {{ page.version.version }}/performance-best-practices-overview.md %}) +- [Local Deployment]({% link {{ page.version.version }}/start-a-local-cluster.md %}) diff --git a/src/current/_includes/v25.3/prod-deployment/provision-cpu.md b/src/current/_includes/v25.3/prod-deployment/provision-cpu.md new file mode 100644 index 00000000000..48896a432cd --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/provision-cpu.md @@ -0,0 +1 @@ +{% if include.threshold == "absolute_minimum" %}**4 vCPUs**{% elsif include.threshold == "minimum" %}**8 vCPUs**{% elsif include.threshold == "maximum" %}**32 vCPUs**{% endif %} diff --git a/src/current/_includes/v25.3/prod-deployment/provision-disk-io.md b/src/current/_includes/v25.3/prod-deployment/provision-disk-io.md new file mode 100644 index 00000000000..dadd7113e01 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/provision-disk-io.md @@ -0,0 +1 @@ +500 IOPS and 30 MB/s per vCPU diff --git a/src/current/_includes/v25.3/prod-deployment/provision-memory.md b/src/current/_includes/v25.3/prod-deployment/provision-memory.md new file mode 100644 index 00000000000..98136337374 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/provision-memory.md @@ -0,0 +1 @@ +**4 GiB of RAM per vCPU** \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/provision-storage.md b/src/current/_includes/v25.3/prod-deployment/provision-storage.md new file mode 100644 index 00000000000..745ebc8dace --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/provision-storage.md @@ -0,0 +1 @@ +320 GiB per vCPU \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/recommended-instances-aws.md b/src/current/_includes/v25.3/prod-deployment/recommended-instances-aws.md new file mode 100644 index 00000000000..87d0f53e95c --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/recommended-instances-aws.md @@ -0,0 +1,7 @@ +- Use general-purpose [`m6i` or `m6a`](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/general-purpose-instances.html) VMs with SSD-backed [EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html). For example, Cockroach Labs has used `m6i.2xlarge` for performance benchmarking. If your workload requires high throughput, use network-optimized `m5n` instances. To simulate bare-metal deployments, use `m5d` with [SSD Instance Store volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). + + - `m5` and `m5a` instances, and [compute-optimized `c5`, `c5a`, and `c5n`](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/compute-optimized-instances.html) instances, are also acceptable. + + {{site.data.alerts.callout_danger}} + **Do not** use [burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html), which limit the load on a single core. + {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/recommended-instances-azure.md b/src/current/_includes/v25.3/prod-deployment/recommended-instances-azure.md new file mode 100644 index 00000000000..2712b3542ef --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/recommended-instances-azure.md @@ -0,0 +1,7 @@ +- Use general-purpose [Dsv5-series](https://docs.microsoft.com/azure/virtual-machines/dv5-dsv5-series) and [Dasv5-series](https://docs.microsoft.com/azure/virtual-machines/dasv5-dadsv5-series) or memory-optimized [Ev5-series](https://docs.microsoft.com/azure/virtual-machines/ev5-esv5-series) and [Easv5-series](https://docs.microsoft.com/azure/virtual-machines/easv5-eadsv5-series#easv5-series) VMs. For example, Cockroach Labs has used `Standard_D8s_v5`, `Standard_D8as_v5`, `Standard_E8s_v5`, and `Standard_e8as_v5` for performance benchmarking. + + - Compute-optimized [F-series](https://docs.microsoft.com/azure/virtual-machines/fsv2-series) VMs are also acceptable. + + {{site.data.alerts.callout_danger}} + Do not use ["burstable" B-series](https://docs.microsoft.com/azure/virtual-machines/linux/b-series-burstable) VMs, which limit the load on CPU resources. Also, Cockroach Labs has experienced data corruption issues on A-series VMs, so we recommend avoiding those as well. + {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/recommended-instances-gcp.md b/src/current/_includes/v25.3/prod-deployment/recommended-instances-gcp.md new file mode 100644 index 00000000000..6dbe048cd16 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/recommended-instances-gcp.md @@ -0,0 +1,5 @@ +- Use general-purpose [`t2d-standard`, `n2-standard`, or `n2d-standard`](https://cloud.google.com/compute/pricing#predefined_machine_types) VMs, or use [custom VMs](https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type). For example, Cockroach Labs has used `t2d-standard-8`, `n2-standard-8`, and `n2d-standard-8` for performance benchmarking. + + {{site.data.alerts.callout_danger}} + Do not use `f1` or `g1` [shared-core machines](https://cloud.google.com/compute/docs/machine-types#sharedcore), which limit the load on CPU resources. + {{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/resolution-excessive-concurrency.md b/src/current/_includes/v25.3/prod-deployment/resolution-excessive-concurrency.md new file mode 100644 index 00000000000..01d54228e53 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/resolution-excessive-concurrency.md @@ -0,0 +1 @@ +To prevent issues with workload concurrency, [provision sufficient CPU]({% link {{ page.version.version }}/recommended-production-settings.md %}#sizing) and use [connection pooling]({% link {{ page.version.version }}/recommended-production-settings.md %}#connection-pooling) for the workload. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/resolution-inverted-lsm.md b/src/current/_includes/v25.3/prod-deployment/resolution-inverted-lsm.md new file mode 100644 index 00000000000..75693d3bb35 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/resolution-inverted-lsm.md @@ -0,0 +1 @@ +If compaction has fallen behind and caused an [inverted LSM]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms), throttle your workload concurrency to allow [compaction]({% link {{ page.version.version }}/architecture/storage-layer.md %}#compaction) to catch up and restore a healthy LSM shape. {% include {{ page.version.version }}/prod-deployment/prod-guidance-connection-pooling.md %} If a node is severely impacted, you can [start a new node]({% link {{ page.version.version }}/cockroach-start.md %}) and then [decommission the problematic node](node-shutdown.html?filters=decommission#remove-nodes). {% include {{page.version.version}}/storage/compaction-concurrency.md %} diff --git a/src/current/_includes/v25.3/prod-deployment/resolution-oom-crash.md b/src/current/_includes/v25.3/prod-deployment/resolution-oom-crash.md new file mode 100644 index 00000000000..e407039f21c --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/resolution-oom-crash.md @@ -0,0 +1 @@ +To prevent OOM crashes, [provision sufficient memory]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory). If all CockroachDB machines are provisioned and configured correctly, either run the CockroachDB process on another node with sufficient memory, or [reduce the memory allocated to CockroachDB]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/resolution-untuned-query.md b/src/current/_includes/v25.3/prod-deployment/resolution-untuned-query.md new file mode 100644 index 00000000000..3db116e41c1 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/resolution-untuned-query.md @@ -0,0 +1 @@ +If you find queries that are consuming too much memory, [cancel the queries]({% link {{ page.version.version }}/manage-long-running-queries.md %}#cancel-long-running-queries) to free up memory usage. For information on optimizing query performance, see [SQL Performance Best Practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}). \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/secure-generate-certificates.md b/src/current/_includes/v25.3/prod-deployment/secure-generate-certificates.md new file mode 100644 index 00000000000..adaf90ca0b0 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-generate-certificates.md @@ -0,0 +1,201 @@ +You can use `cockroach cert` commands or [`openssl` commands]({% link {{ page.version.version }}/create-security-certificates-openssl.md %}) to generate security certificates. This section features the `cockroach cert` commands. + +Locally, you'll need to [create the following certificates and keys]({% link {{ page.version.version }}/cockroach-cert.md %}): + +- A certificate authority (CA) key pair (`ca.crt` and `ca.key`). +- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP addresses and common names for machines running load balancers. +- A client key pair for the `root` user. You'll use this to run a sample workload against the cluster as well as some `cockroach` client commands from your local machine. + +{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}} + +1. [Install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}) on your local machine, if you haven't already. + +1. Create two directories: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir certs + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir my-safe-directory + ~~~ + - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes. + - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes. + +1. Create the CA certificate and key: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-ca \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to the load balancer instances: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-node \ + \ + \ + \ + \ + localhost \ + 127.0.0.1 \ + \ + \ + \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Upload the CA certificate and node certificate and key to the first node: + + {% if page.title contains "Google" %} + {% include_cached copy-clipboard.html %} + ~~~ shell + $ gcloud compute ssh \ + --project \ + --command "mkdir certs" + ~~~ + + {{site.data.alerts.callout_info}} + `gcloud compute ssh` associates your public SSH key with the GCP project and is only needed when connecting to the first node. See the [GCP docs](https://cloud.google.com/sdk/gcloud/reference/compute/ssh) for more details. + {{site.data.alerts.end}} + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ scp certs/ca.crt \ + certs/node.crt \ + certs/node.key \ + @:~/certs + ~~~ + + {% elsif page.title contains "AWS" %} + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ssh-add /path/.pem + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ssh @ "mkdir certs" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ scp certs/ca.crt \ + certs/node.crt \ + certs/node.key \ + @:~/certs + ~~~ + + {% else %} + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ssh @ "mkdir certs" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ scp certs/ca.crt \ + certs/node.crt \ + certs/node.key \ + @:~/certs + ~~~ + {% endif %} + +1. Delete the local copy of the node certificate and key: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ rm certs/node.crt certs/node.key + ~~~ + + {{site.data.alerts.callout_info}} + This is necessary because the certificates and keys for additional nodes will also be named `node.crt` and `node.key`. As an alternative to deleting these files, you can run the next `cockroach cert create-node` commands with the `--overwrite` flag. + {{site.data.alerts.end}} + +1. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to the load balancer instances: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-node \ + \ + \ + \ + \ + localhost \ + 127.0.0.1 \ + \ + \ + \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Upload the CA certificate and node certificate and key to the second node: + + {% if page.title contains "AWS" %} + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ssh @ "mkdir certs" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ scp certs/ca.crt \ + certs/node.crt \ + certs/node.key \ + @:~/certs + ~~~ + + {% else %} + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ssh @ "mkdir certs" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ scp certs/ca.crt \ + certs/node.crt \ + certs/node.key \ + @:~/certs + ~~~ + {% endif %} + +1. Repeat steps 6 - 8 for each additional node. + +1. Create a client certificate and key for the `root` user: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach cert create-client \ + root \ + --certs-dir=certs \ + --ca-key=my-safe-directory/ca.key + ~~~ + +1. Upload the CA certificate and client certificate and key to the machine where you will run a sample workload: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ ssh @ "mkdir certs" + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ scp certs/ca.crt \ + certs/client.root.crt \ + certs/client.root.key \ + @:~/certs + ~~~ + + In later steps, you'll also use the `root` user's certificate to run [`cockroach`]({% link {{ page.version.version }}/cockroach-commands.md %}) client commands from your local machine. If you might also want to run `cockroach` client commands directly on a node (e.g., for local debugging), you'll need to copy the `root` user's certificate and key to that node as well. + +{{site.data.alerts.callout_info}} +On accessing the DB Console in a later step, your browser will consider the CockroachDB-created certificate invalid and you’ll need to click through a warning message to get to the UI. You can avoid this issue by [using a certificate issued by a public CA]({% link {{ page.version.version }}/create-security-certificates-custom-ca.md %}#accessing-the-db-console-for-a-secure-cluster). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/prod-deployment/secure-initialize-cluster.md b/src/current/_includes/v25.3/prod-deployment/secure-initialize-cluster.md new file mode 100644 index 00000000000..5efa831e6f7 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-initialize-cluster.md @@ -0,0 +1,8 @@ +On your local machine, run the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command to complete the node startup process and have them join together as a cluster: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach init --certs-dir=certs --host=
+~~~ + +After running this command, each node prints helpful details to the [standard output]({% link {{ page.version.version }}/cockroach-start.md %}#standard-output), such as the CockroachDB version, the URL for the DB Console, and the SQL URL for clients. diff --git a/src/current/_includes/v25.3/prod-deployment/secure-recommendations.md b/src/current/_includes/v25.3/prod-deployment/secure-recommendations.md new file mode 100644 index 00000000000..528850dbbb0 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-recommendations.md @@ -0,0 +1,7 @@ +- Decide how you want to access your DB Console: + + Access Level | Description + -------------|------------ + Partially open | Set a firewall rule to allow only specific IP addresses to communicate on port `8080`. + Completely open | Set a firewall rule to allow all IP addresses to communicate on port `8080`. + Completely closed | Set a firewall rule to disallow all communication on port `8080`. In this case, a machine with SSH access to a node could use an SSH tunnel to access the DB Console. diff --git a/src/current/_includes/v25.3/prod-deployment/secure-requirements.md b/src/current/_includes/v25.3/prod-deployment/secure-requirements.md new file mode 100644 index 00000000000..f27496dd612 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-requirements.md @@ -0,0 +1,11 @@ +- You must have [CockroachDB installed]({% link {{ page.version.version }}/install-cockroachdb.md %}) locally. This is necessary for generating and managing your deployment's certificates. + +- You must have [SSH access]({{page.ssh-link}}) to each machine. This is necessary for distributing and starting CockroachDB binaries. + +- Your network configuration must allow TCP communication on the following ports: + - `26257` for intra-cluster and client-cluster communication + - `8080` to expose your DB Console + +- Carefully review the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}), including supported hardware and software, and the recommended [Topology Patterns]({% link {{ page.version.version }}/topology-patterns.md %}). + +{% include {{ page.version.version }}/prod-deployment/topology-recommendations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/secure-scale-cluster.md b/src/current/_includes/v25.3/prod-deployment/secure-scale-cluster.md new file mode 100644 index 00000000000..8c980b018a3 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-scale-cluster.md @@ -0,0 +1,123 @@ +You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/). + +
+ + +
+

+ +
+ +For each additional node you want to add to the cluster, complete the following steps: + +1. SSH to the machine where you want the node to run. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command, passing the new node's address as the `--advertise-addr` flag and pointing `--join` to the three existing nodes (also include `--locality` if you set it earlier). + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + --certs-dir=certs \ + --advertise-addr= \ + --join=,, \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + +1. Update your load balancer to recognize the new node. + +
+ +
+ +For each additional node you want to add to the cluster, complete the following steps: + +1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + curl -o cockroach-{{ page.release_info.version }}.linux-amd64.tgz https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz; tar xzvf cockroach-{{ page.release_info.version }}.linux-amd64.tgz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Create the Cockroach directory: + + {% include_cached copy-clipboard.html %} + ~~~ shell + mkdir /var/lib/cockroach + ~~~ + +1. Create a Unix user named `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + useradd cockroach + ~~~ + +1. Move the `certs` directory to the `cockroach` directory. + + {% include_cached copy-clipboard.html %} + ~~~ shell + mv certs /var/lib/cockroach/ + ~~~ + +1. Change the ownership of the `cockroach` directory to the user `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + chown -R cockroach /var/lib/cockroach + ~~~ + +1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/securecockroachdb.service): + + {% include_cached copy-clipboard.html %} + ~~~ shell + curl -o securecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/securecockroachdb.service + ~~~ + + Alternatively, you can create the file yourself and copy the script into it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {% include {{ page.version.version }}/prod-deployment/securecockroachdb.service %} + ~~~ + + Save the file in the `/etc/systemd/system/` directory. + +1. Customize the sample configuration template for your deployment: + + Specify values for the following flags in the sample configuration template: + + {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %} + +1. Repeat these steps for each additional node that you want in your cluster. + +
diff --git a/src/current/_includes/v25.3/prod-deployment/secure-start-nodes.md b/src/current/_includes/v25.3/prod-deployment/secure-start-nodes.md new file mode 100644 index 00000000000..f3c554f4f19 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-start-nodes.md @@ -0,0 +1,168 @@ +You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/). + +
+ + +
+

+ +
+ +For each initial node of your cluster, complete the following steps: + +{{site.data.alerts.callout_info}} +After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step. +{{site.data.alerts.end}} + +1. Visit [Releases]({% link releases/index.md %}) and download the full binary of CockroachDB to the node. + +1. On the node, follow the instructions to [install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}). + +1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start \ + --certs-dir=certs \ + --advertise-addr= \ + --join=,, \ + --cache=.25 \ + --max-sql-memory=.25 \ + --background + ~~~ + + This command primes the node to start, using the following flags: + + Flag | Description + -----|------------ + `--certs-dir` | Specifies the directory where you placed the `ca.crt` file and the `node.crt` and `node.key` files for the node. + `--advertise-addr` | Specifies the IP address/hostname and port to tell other nodes to use. The port number can be omitted, in which case it defaults to `26257`.

This value must route to an IP address the node is listening on (with `--listen-addr` unspecified, the node listens on all IP addresses).

In some networking scenarios, you may need to use `--advertise-addr` and/or `--listen-addr` differently. For more details, see [Networking]({% link {{ page.version.version }}/recommended-production-settings.md %}#networking). + `--join` | Identifies the address of 3-5 of the initial nodes of the cluster. These addresses should match the addresses that the target nodes are advertising. + `--cache`
`--max-sql-memory` | Increases the node's cache size to 25% of available system memory to improve read performance. The capacity for in-memory SQL processing defaults to 25% of system memory but can be raised, if necessary, to increase the number of simultaneous client connections allowed by the node as well as the node's capacity for in-memory processing of rows when using `ORDER BY`, `GROUP BY`, `DISTINCT`, joins, and window functions. For more details, see [Cache and SQL Memory Size]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). + `--background` | Starts the node in the background so you gain control of the terminal to issue more commands. + + When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality). + + For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-addr=:8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}). + +Repeat these steps for each additional node that you want in your cluster. + +
+ +
+ +For each initial node of your cluster, complete the following steps: + +{{site.data.alerts.callout_info}} +After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step. +{{site.data.alerts.end}} + +1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. CockroachDB uses custom-built versions of the [GEOS]({% link {{ page.version.version }}/architecture/glossary.md %}#geos) libraries. Copy these libraries to the location where CockroachDB expects to find them: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir -p /usr/local/lib/cockroach + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos.so /usr/local/lib/cockroach/ + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos_c.so /usr/local/lib/cockroach/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Create the Cockroach directory: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mkdir /var/lib/cockroach + ~~~ + +1. Create a Unix user named `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ useradd cockroach + ~~~ + +1. Move the `certs` directory to the `cockroach` directory. + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ mv certs /var/lib/cockroach/ + ~~~ + +1. Change the ownership of the `cockroach` directory to the user `cockroach`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ chown -R cockroach /var/lib/cockroach + ~~~ + +1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/{{ page.version.version }}/prod-deployment/securecockroachdb.service) and save the file in the `/etc/systemd/system/` directory: + + {% include_cached copy-clipboard.html %} + ~~~ shell + curl -o securecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/v23.2/prod-deployment/securecockroachdb.service + ~~~ + + Alternatively, you can create the file yourself and copy the script into it: + + {% include_cached copy-clipboard.html %} + ~~~ shell + {% include {{ page.version.version }}/prod-deployment/securecockroachdb.service %} + ~~~ + +1. In the sample configuration template, specify values for the following flags: + + {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %} + + When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality). + + For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-addr=localhost:8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}). + +1. Start the CockroachDB cluster: + + {% include_cached copy-clipboard.html %} + ~~~ shell + systemctl start securecockroachdb + ~~~ + +1. Configure `systemd` to start CockroachDB automatically after a reboot: + + {% include_cached copy-clipboard.html %} + ~~~ shell + systemctl enable securecockroachdb + ~~~ + +1. Repeat these steps for each additional node that you want in your cluster. + +{{site.data.alerts.callout_info}} +`systemd` handles node restarts in case of node failure. To stop a node without `systemd` restarting it, run `systemctl stop securecockroachdb` +{{site.data.alerts.end}} + +
diff --git a/src/current/_includes/v25.3/prod-deployment/secure-test-cluster.md b/src/current/_includes/v25.3/prod-deployment/secure-test-cluster.md new file mode 100644 index 00000000000..2eef1f9ef4f --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-test-cluster.md @@ -0,0 +1,41 @@ +CockroachDB replicates and distributes data behind-the-scenes and uses a [Gossip protocol](https://wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. Once a cluster is live, any node can be used as a SQL gateway. + +When using a load balancer, you should issue commands directly to the load balancer, which then routes traffic to the nodes. + +Use the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) locally as follows: + +1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the address of the load balancer: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --certs-dir=certs --host=
+ ~~~ + +1. Create a `securenodetest` database: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > CREATE DATABASE securenodetest; + ~~~ + +1. View the cluster's databases, which will include `securenodetest`: + + {% include_cached copy-clipboard.html %} + ~~~ sql + > SHOW DATABASES; + ~~~ + + ~~~ + +--------------------+ + | Database | + +--------------------+ + | crdb_internal | + | information_schema | + | securenodetest | + | pg_catalog | + | system | + +--------------------+ + (5 rows) + ~~~ + +1. Use `\q` to exit the SQL shell. \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/secure-test-load-balancing.md b/src/current/_includes/v25.3/prod-deployment/secure-test-load-balancing.md new file mode 100644 index 00000000000..ba1ecd90919 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/secure-test-load-balancing.md @@ -0,0 +1,77 @@ +CockroachDB comes with a number of [built-in workloads]({% link {{ page.version.version }}/cockroach-workload.md %}) for simulating client traffic. This step features CockroachDB's version of the [TPC-C](http://www.tpc.org/tpcc/) workload. + +{{site.data.alerts.callout_info}} +Be sure that you have configured your network to allow traffic from the application to the load balancer. In this case, you will run the sample workload on one of your machines. The traffic source should therefore be the **internal (private)** IP address of that machine. +{{site.data.alerts.end}} + +For comprehensive guidance on benchmarking CockroachDB with TPC-C, refer to [Performance Benchmarking]({% link {{ page.version.version }}/performance-benchmarking-with-tpcc-local.md %}). + +1. SSH to the machine where you want to run the sample TPC-C workload. + + This should be a machine that is not running a CockroachDB node, and it should already have a `certs` directory containing `ca.crt`, `client.root.crt`, and `client.root.key` files. + +1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \ + | tar -xz + ~~~ + +1. Copy the binary into the `PATH`: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/ + ~~~ + + If you get a permissions error, prefix the command with `sudo`. + +1. Use the [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) command to load the initial schema and data, pointing it at the IP address of the load balancer: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach workload init tpcc \ + 'postgresql://root@:26257/tpcc?sslmode=verify-full&sslrootcert=certs/ca.crt&sslcert=certs/client.root.crt&sslkey=certs/client.root.key' + ~~~ + +1. Use the `cockroach workload` command to run the workload for 10 minutes: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach workload run tpcc \ + --duration=10m \ + 'postgresql://root@:26257/tpcc?sslmode=verify-full&sslrootcert=certs/ca.crt&sslcert=certs/client.root.crt&sslkey=certs/client.root.key' + ~~~ + + You'll see per-operation statistics print to standard output every second: + + ~~~ + _elapsed___errors__ops/sec(inst)___ops/sec(cum)__p50(ms)__p95(ms)__p99(ms)_pMax(ms) + 1s 0 1443.4 1494.8 4.7 9.4 27.3 67.1 transfer + 2s 0 1686.5 1590.9 4.7 8.1 15.2 28.3 transfer + 3s 0 1735.7 1639.0 4.7 7.3 11.5 28.3 transfer + 4s 0 1542.6 1614.9 5.0 8.9 12.1 21.0 transfer + 5s 0 1695.9 1631.1 4.7 7.3 11.5 22.0 transfer + 6s 0 1569.2 1620.8 5.0 8.4 11.5 15.7 transfer + 7s 0 1614.6 1619.9 4.7 8.1 12.1 16.8 transfer + 8s 0 1344.4 1585.6 5.8 10.0 15.2 31.5 transfer + 9s 0 1351.9 1559.5 5.8 10.0 16.8 54.5 transfer + 10s 0 1514.8 1555.0 5.2 8.1 12.1 16.8 transfer + ... + ~~~ + + After the specified duration (10 minutes in this case), the workload will stop and you'll see totals printed to standard output: + + ~~~ + _elapsed___errors_____ops(total)___ops/sec(cum)__avg(ms)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)__result + 600.0s 0 823902 1373.2 5.8 5.5 10.0 15.2 209.7 + ~~~ + + {{site.data.alerts.callout_success}} + For more `tpcc` options, use `cockroach workload run tpcc --help`. For details about other workloads built into the `cockroach` binary, use `cockroach workload --help`. + {{site.data.alerts.end}} + +1. To monitor the load generator's progress, open the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) by pointing a browser to the address in the `admin` field in the standard output of any node on startup. + + Since the load generator is pointed at the load balancer, the connections will be evenly distributed across nodes. To verify this, click **Metrics** on the left, select the **SQL** dashboard, and then check the **SQL Connections** graph. You can use the **Graph** menu to filter the graph for specific nodes. diff --git a/src/current/_includes/v25.3/prod-deployment/securecockroachdb.service b/src/current/_includes/v25.3/prod-deployment/securecockroachdb.service new file mode 100644 index 00000000000..13658ae4cce --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/securecockroachdb.service @@ -0,0 +1,16 @@ +[Unit] +Description=Cockroach Database cluster node +Requires=network.target +[Service] +Type=notify +WorkingDirectory=/var/lib/cockroach +ExecStart=/usr/local/bin/cockroach start --certs-dir=certs --advertise-addr= --join=,, --cache=.25 --max-sql-memory=.25 +TimeoutStopSec=300 +Restart=always +RestartSec=10 +StandardOutput=syslog +StandardError=syslog +SyslogIdentifier=cockroach +User=cockroach +[Install] +WantedBy=default.target diff --git a/src/current/_includes/v25.3/prod-deployment/synchronize-clocks.md b/src/current/_includes/v25.3/prod-deployment/synchronize-clocks.md new file mode 100644 index 00000000000..b120a3a735b --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/synchronize-clocks.md @@ -0,0 +1,179 @@ +CockroachDB requires moderate levels of [clock synchronization]({% link {{ page.version.version }}/recommended-production-settings.md %}#clock-synchronization) to preserve data consistency. For this reason, when a node detects that its clock is out of sync with at least half of the other nodes in the cluster by 80% of the maximum offset allowed (500ms by default), it spontaneously shuts down. This avoids the risk of consistency anomalies, but it's best to prevent clocks from drifting too far in the first place by running clock synchronization software on each node. + +{% if page.title contains "Digital Ocean" or page.title contains "On-Premises" %} + +[`ntpd`](http://doc.ntp.org/) should keep offsets in the single-digit milliseconds, so that software is featured here, but other methods of clock synchronization are suitable as well. + +1. SSH to the first machine. + +1. Disable `timesyncd`, which tends to be active by default on some Linux distributions: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo timedatectl set-ntp no + ~~~ + + Verify that `timesyncd` is off: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ timedatectl + ~~~ + + Look for `Network time on: no` or `NTP enabled: no` in the output. + +1. Install the `ntp` package: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo apt-get install ntp + ~~~ + +1. Stop the NTP daemon: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo service ntp stop + ~~~ + +1. Sync the machine's clock with Google's NTP service: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo ntpd -b time.google.com + ~~~ + + To make this change permanent, in the `/etc/ntp.conf` file, remove or comment out any lines starting with `server` or `pool` and add the following lines: + + {% include_cached copy-clipboard.html %} + ~~~ + server time1.google.com iburst + server time2.google.com iburst + server time3.google.com iburst + server time4.google.com iburst + ~~~ + + Restart the NTP daemon: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo service ntp start + ~~~ + + {{site.data.alerts.callout_info}} + We recommend Google's NTP service because it handles ["smearing" the leap second](https://developers.google.com/time/smear). If you use a different NTP service that doesn't smear the leap second, be sure to configure client-side smearing in the same way on each machine. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details. + {{site.data.alerts.end}} + +1. Verify that the machine is using a Google NTP server: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo ntpq -p + ~~~ + + The active NTP server will be marked with an asterisk. + +1. Repeat these steps for each machine where a CockroachDB node will run. + +{% elsif page.title contains "Google" %} + +Compute Engine instances are preconfigured to use [NTP](http://www.ntp.org/), which should keep offsets in the single-digit milliseconds. However, Google can’t predict how external NTP services, such as `pool.ntp.org`, will handle the leap second. Therefore, you should: + +- [Configure each GCE instance to use Google's internal NTP service](https://cloud.google.com/compute/docs/instances/configure-ntp#configure_ntp_for_your_instances). +- If you plan to run a hybrid cluster across GCE and other cloud providers or environments, note that all of the nodes must be synced to the same time source, or to different sources that implement leap second smearing in the same way. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details. + +{% elsif page.title contains "AWS" %} + +Amazon provides the [Amazon Time Sync Service](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html), which uses a fleet of satellite-connected and atomic reference clocks in each AWS Region to deliver accurate current time readings. The service also smears the leap second. + +- [Configure each AWS instance to use the internal Amazon Time Sync Service](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html#configure-amazon-time-service). + - Per the above instructions, ensure that `etc/chrony.conf` on the instance contains the line `server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4` and that other `server` or `pool` lines are commented out. + - To verify that Amazon Time Sync Service is being used, run `chronyc sources -v` and check for a line containing `* 169.254.169.123`. The `*` denotes the preferred time server. +- If you plan to run a hybrid cluster across GCE and other cloud providers or environments, note that all of the nodes must be synced to the same time source, or to different sources that implement leap second smearing in the same way. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details. + +{% elsif page.title contains "Azure" %} + +[`ntpd`](http://doc.ntp.org/) should keep offsets in the single-digit milliseconds, so that software is featured here. However, to run `ntpd` properly on Azure VMs, it's necessary to first unbind the Time Synchronization device used by the Hyper-V technology running Azure VMs; this device aims to synchronize time between the VM and its host operating system but has been known to cause problems. + +1. SSH to the first machine. + +1. Find the ID of the Hyper-V Time Synchronization device: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ curl -O https://raw.githubusercontent.com/torvalds/linux/master/tools/hv/lsvmbus + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ python lsvmbus -vv | grep -w "Time Synchronization" -A 3 + ~~~ + + ~~~ + VMBUS ID 12: Class_ID = {9527e630-d0ae-497b-adce-e80ab0175caf} - [Time Synchronization] + Device_ID = {2dd1ce17-079e-403c-b352-a1921ee207ee} + Sysfs path: /sys/bus/vmbus/devices/2dd1ce17-079e-403c-b352-a1921ee207ee + Rel_ID=12, target_cpu=0 + ~~~ + +1. Unbind the device, using the `Device_ID` from the previous command's output: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ echo | sudo tee /sys/bus/vmbus/drivers/hv_utils/unbind + ~~~ + +1. Install the `ntp` package: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo apt-get install ntp + ~~~ + +1. Stop the NTP daemon: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo service ntp stop + ~~~ + +1. Sync the machine's clock with Google's NTP service: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo ntpd -b time.google.com + ~~~ + + To make this change permanent, in the `/etc/ntp.conf` file, remove or comment out any lines starting with `server` or `pool` and add the following lines: + + {% include_cached copy-clipboard.html %} + ~~~ + server time1.google.com iburst + server time2.google.com iburst + server time3.google.com iburst + server time4.google.com iburst + ~~~ + + Restart the NTP daemon: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo service ntp start + ~~~ + + {{site.data.alerts.callout_info}} + We recommend Google's NTP service because it handles ["smearing" the leap second](https://developers.google.com/time/smear). If you use a different NTP service that doesn't smear the leap second, be sure to configure client-side smearing in the same way on each machine. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details. + {{site.data.alerts.end}} + +1. Verify that the machine is using a Google NTP server: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ sudo ntpq -p + ~~~ + + The active NTP server will be marked with an asterisk. + +1. Repeat these steps for each machine where a CockroachDB node will run. + +{% endif %} diff --git a/src/current/_includes/v25.3/prod-deployment/terminology-vcpu.md b/src/current/_includes/v25.3/prod-deployment/terminology-vcpu.md new file mode 100644 index 00000000000..790ce37a2b9 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/terminology-vcpu.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +In our sizing and production guidance, 1 vCPU is considered equivalent to 1 core in the underlying hardware platform. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/prod-deployment/topology-recommendations.md b/src/current/_includes/v25.3/prod-deployment/topology-recommendations.md new file mode 100644 index 00000000000..b6cdfdb7510 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/topology-recommendations.md @@ -0,0 +1,26 @@ +- Do not run multiple node processes on the same VM or machine. This defeats CockroachDB's replication and causes the system to be a single point of failure. Instead, start each node on a separate VM or machine. +- To start a node with multiple disks or SSDs, you can use either of these approaches: + - Configure the disks or SSDs as a single RAID volume, then pass the RAID volume to the `--store` flag when starting the `cockroach` process on the node. + - Provide a separate `--store` flag for each disk when starting the `cockroach` process on the node. For more details about stores, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}#store). + + {{site.data.alerts.callout_danger}} + If you start a node with multiple `--store` flags, it is not possible to scale back down to only using a single store on the node. Instead, you must decommission the node and start a new node with the updated `--store`. + {{site.data.alerts.end}} + +- When starting each node, use the [`--locality`]({% link {{ page.version.version }}/cockroach-start.md %}#locality) flag to describe the node's location, for example, `--locality=region=west,zone=us-west-1`. The key-value pairs should be ordered from most to least inclusive, and the keys and order of key-value pairs must be the same on all nodes. + +- When deploying in a single availability zone: + + - To be able to tolerate the failure of any 1 node, use at least 3 nodes with the [`default` 3-way replication factor]({% link {{ page.version.version }}/configure-replication-zones.md %}#view-the-default-replication-zone). In this case, if 1 node fails, each range retains 2 of its 3 replicas, a majority. + + - To be able to tolerate 2 simultaneous node failures, use at least 5 nodes and [increase the `default` replication factor for user data]({% link {{ page.version.version }}/configure-replication-zones.md %}#edit-the-default-replication-zone) to 5. The replication factor for [important internal data]({% link {{ page.version.version }}/configure-replication-zones.md %}#create-a-replication-zone-for-a-system-range) is 5 by default, so no adjustments are needed for internal data. In this case, if 2 nodes fail at the same time, each range retains 3 of its 5 replicas, a majority. + +- When deploying across multiple availability zones: + + - To be able to tolerate the failure of 1 entire AZ in a region, use at least 3 AZs per region and set `--locality` on each node to spread data evenly across regions and AZs. In this case, if 1 AZ goes offline, the 2 remaining AZs retain a majority of replicas. + + - To ensure that ranges are split evenly across nodes, use the same number of nodes in each AZ. This is to avoid overloading any nodes with excessive resource consumption. + +- When deploying across multiple regions: + + - To be able to tolerate the failure of 1 entire region, use at least 3 regions. diff --git a/src/current/_includes/v25.3/prod-deployment/use-cluster.md b/src/current/_includes/v25.3/prod-deployment/use-cluster.md new file mode 100644 index 00000000000..0230ff5e682 --- /dev/null +++ b/src/current/_includes/v25.3/prod-deployment/use-cluster.md @@ -0,0 +1,12 @@ +Now that your deployment is working, you can: + +1. [Implement your data model]({% link {{ page.version.version }}/sql-statements.md %}). +1. [Create users]({% link {{ page.version.version }}/create-user.md %}) and [grant them privileges]({% link {{ page.version.version }}/grant.md %}). +1. [Connect your application]({% link {{ page.version.version }}/install-client-drivers.md %}). Be sure to connect your application to the load balancer, not to a CockroachDB node. +1. [Take backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}) of your data. + +You may also want to adjust the way the cluster replicates data. For example, by default, a multi-node cluster replicates all data 3 times; you can change this replication factor or create additional rules for replicating individual databases and tables differently. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}). + +{{site.data.alerts.callout_danger}} +When running a cluster of 5 nodes or more, it's safest to [increase the replication factor for important internal data]({% link {{ page.version.version }}/configure-replication-zones.md %}#create-a-replication-zone-for-a-system-range) to 5, even if you do not do so for user data. For the cluster as a whole to remain available, the ranges for this internal data must always retain a majority of their replicas. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/resilience/dr-feature-table.md b/src/current/_includes/v25.3/resilience/dr-feature-table.md new file mode 100644 index 00000000000..2429d8b0675 --- /dev/null +++ b/src/current/_includes/v25.3/resilience/dr-feature-table.md @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Point-in-time backup & restorePhysical cluster replication (asynchronous)
+ RPO + >=5 minutes10s of seconds
+ RTO + Minutes to hours, depending on data size and number of nodesSeconds to minutes, depending on cluster size, and time of failover
+ Write latency + No impactNo impact
+ Recovery + Manual restoreManual failover
+ Fault tolerance + Not applicableZero RPO node, availability zone within a cluster, region failures with loss up to RPO in a two-region (or two-datacenter) setup
+ Minimum regions to achieve fault tolerance + 12
diff --git a/src/current/_includes/v25.3/resilience/recovery-objectives-definition.md b/src/current/_includes/v25.3/resilience/recovery-objectives-definition.md new file mode 100644 index 00000000000..bcbd9102359 --- /dev/null +++ b/src/current/_includes/v25.3/resilience/recovery-objectives-definition.md @@ -0,0 +1,2 @@ +- **Recovery Point Objective (RPO)**: The maximum amount of data loss (measured by time) that an organization can tolerate. +- **Recovery Time Objective (RTO)**: The maximum length of time it should take to restore normal operations following an outage. \ No newline at end of file diff --git a/src/current/_includes/v25.3/scram-authentication-recommendations.md b/src/current/_includes/v25.3/scram-authentication-recommendations.md new file mode 100644 index 00000000000..2ad41f75cd8 --- /dev/null +++ b/src/current/_includes/v25.3/scram-authentication-recommendations.md @@ -0,0 +1,4 @@ +- Test and adjust your workloads in batches when migrating to SCRAM authentication. +- Start by enabling SCRAM authentication in a testing environment, and test the performance of your client application against the types of workloads you expect it to handle in production before rolling the changes out to production. +- Limit the maximum number of connections in the client driver's connection pool. +- Limit the maximum number of concurrent transactions the client application can issue. diff --git a/src/current/_includes/v25.3/see-zone-config-troubleshooting-guide.md b/src/current/_includes/v25.3/see-zone-config-troubleshooting-guide.md new file mode 100644 index 00000000000..838c8ee2cf1 --- /dev/null +++ b/src/current/_includes/v25.3/see-zone-config-troubleshooting-guide.md @@ -0,0 +1 @@ +For instructions showing how to troubleshoot replication zones that may be misconfigured, see [Troubleshoot Replication Zone Configurations]({% link {{ page.version.version}}/troubleshoot-replication-zones.md %}). diff --git a/src/current/_includes/v25.3/setup/create-a-free-cluster.md b/src/current/_includes/v25.3/setup/create-a-free-cluster.md new file mode 100644 index 00000000000..28fbf2e780b --- /dev/null +++ b/src/current/_includes/v25.3/setup/create-a-free-cluster.md @@ -0,0 +1,9 @@ +{% include cockroachcloud/free-cluster-limit.md %} + +1. If you haven't already, sign up for a CockroachDB {{ site.data.products.cloud }} account. +1. [Log in](https://cockroachlabs.cloud/) to your CockroachDB {{ site.data.products.cloud }} account. +1. On the **Clusters** page, click **Create Cluster**. +1. On the **Create your cluster** page, select **Serverless**. +1. Click **Create cluster**. + + Your cluster will be created in a few seconds and the **Create SQL user** dialog will display. \ No newline at end of file diff --git a/src/current/_includes/v25.3/setup/create-first-sql-user.md b/src/current/_includes/v25.3/setup/create-first-sql-user.md new file mode 100644 index 00000000000..21c080a0626 --- /dev/null +++ b/src/current/_includes/v25.3/setup/create-first-sql-user.md @@ -0,0 +1,8 @@ +The **Create SQL user** dialog allows you to create a new SQL user and password. + +1. Enter a username in the **SQL user** field or use the one provided by default. +1. Click **Generate & save password**. +1. Copy the generated password and save it in a secure location. +1. Click **Next**. + + Currently, all new SQL users are created with admin privileges. For more information and to change the default settings, see [Manage SQL users on a cluster]({% link cockroachcloud/managing-access.md %}#manage-sql-users-on-a-cluster). diff --git a/src/current/_includes/v25.3/setup/init-bank-sample.md b/src/current/_includes/v25.3/setup/init-bank-sample.md new file mode 100644 index 00000000000..534658659ef --- /dev/null +++ b/src/current/_includes/v25.3/setup/init-bank-sample.md @@ -0,0 +1,38 @@ +1. Set the `DATABASE_URL` environment variable to the connection string for your cluster: + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + export DATABASE_URL="postgresql://root@localhost:26257?sslmode=disable" + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + export DATABASE_URL="{connection-string}" + ~~~ + + Where `{connection-string}` is the connection string you copied earlier. + +
+ + +1. To initialize the example database, use the [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}) command to execute the SQL statements in the `dbinit.sql` file: + + {% include_cached copy-clipboard.html %} + ~~~ shell + cat dbinit.sql | cockroach sql --url $DATABASE_URL + ~~~ + + The SQL statement in the initialization file should execute: + + ~~~ + CREATE TABLE + + + Time: 102ms + ~~~ diff --git a/src/current/_includes/v25.3/setup/sample-setup-certs.md b/src/current/_includes/v25.3/setup/sample-setup-certs.md new file mode 100644 index 00000000000..e3c70dd385e --- /dev/null +++ b/src/current/_includes/v25.3/setup/sample-setup-certs.md @@ -0,0 +1,78 @@ + +
+ + +
+ +
+ + +

Choose your installation method

+ +You can create a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool. + +
+ + +
+ +
+ +### Create a free cluster + +{% include cockroachcloud/quickstart/create-a-free-cluster.md %} + +### Create a SQL user + +{% include {{ page.version.version }}/setup/create-first-sql-user.md %} + +### Get the root certificate + +The **Connect to cluster** dialog shows information about how to connect to your cluster. + +1. Select **General connection string** from the **Select option** dropdown. +1. Open a new terminal on your local machine, and run the **CA Cert download command** provided in the **Download CA Cert** section. The client driver used in this tutorial requires this certificate to connect to CockroachDB {{ site.data.products.cloud }}. + +### Get the connection string + +Open the **General connection string** section, then copy the connection string provided and save it in a secure location. + +{{site.data.alerts.callout_info}} +The connection string is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`. +{{site.data.alerts.end}} + +
+ +
+ +Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool. + +{{site.data.alerts.callout_info}} +The ccloud CLI tool is in Preview. +{{site.data.alerts.end}} + +

Install ccloud

+ +{% include cockroachcloud/ccloud/install-ccloud.md %} + +### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string. + +{% include cockroachcloud/ccloud/quickstart.md %} + +Select **General connection string**, then copy the connection string displayed and save it in a secure location. The connection string is the line starting `postgresql://`. + +~~~ +? How would you like to connect? General connection string +Retrieving cluster info: succeeded + Downloading cluster cert to /Users/maxroach/.postgresql/root.crt: succeeded +postgresql://maxroach:ThisIsNotAGoodPassword@blue-dog-147.6wr.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=%2FUsers%2Fmaxroach%2F.postgresql%2Froot.crt +~~~ +
+ +
+ +
+ +{% include {{ page.version.version }}/setup/start-single-node-insecure.md %} + +
diff --git a/src/current/_includes/v25.3/setup/sample-setup-jdbc.md b/src/current/_includes/v25.3/setup/sample-setup-jdbc.md new file mode 100644 index 00000000000..f63207c09a0 --- /dev/null +++ b/src/current/_includes/v25.3/setup/sample-setup-jdbc.md @@ -0,0 +1,74 @@ + +
+ + +
+ +
+ +

Choose your installation method

+ +You can create a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool. + +
+ + +
+ +
+ +### Create a free cluster + +{% include cockroachcloud/quickstart/create-a-free-cluster.md %} + +### Create a SQL user + +{% include {{ page.version.version }}/setup/create-first-sql-user.md %} + +### Get the connection string + +The **Connect to cluster** dialog shows information about how to connect to your cluster. + +1. Select **Java** from the **Select option/language** dropdown. +1. Select **JDBC** from the **Select tool** dropdown. +1. Copy the command provided to set the `JDBC_DATABASE_URL` environment variable. + + {{site.data.alerts.callout_info}} + The JDBC connection URL is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`. + {{site.data.alerts.end}} + +
+ +
+ +Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool. + +{{site.data.alerts.callout_info}} +The ccloud CLI tool is in Preview. +{{site.data.alerts.end}} + +

Install ccloud

+ +{% include cockroachcloud/ccloud/install-ccloud.md %} + +### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string. + +{% include cockroachcloud/ccloud/quickstart.md %} + +Select **General connection string**, then copy the connection string displayed and save it in a secure location. The connection string is the line starting `postgresql://`. + +~~~ +? How would you like to connect? General connection string +Retrieving cluster info: succeeded + Downloading cluster cert to /Users/maxroach/.postgresql/root.crt: succeeded +postgresql://maxroach:ThisIsNotAGoodPassword@blue-dog-147.6wr.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=%2FUsers%2Fmaxroach%2F.postgresql%2Froot.crt +~~~ +
+ +
+ +
+ +{% include {{ page.version.version }}/setup/start-single-node-insecure.md %} + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/setup/sample-setup-parameters-certs.md b/src/current/_includes/v25.3/setup/sample-setup-parameters-certs.md new file mode 100644 index 00000000000..7cbf4348f1a --- /dev/null +++ b/src/current/_includes/v25.3/setup/sample-setup-parameters-certs.md @@ -0,0 +1,85 @@ + +
+ + +
+ +
+ + +

Choose your installation method

+ +You can install a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool. + +
+ + +
+ +
+ +### Create a free cluster + +{% include cockroachcloud/quickstart/create-a-free-cluster.md %} + +### Create a SQL user + +{% include {{ page.version.version }}/setup/create-first-sql-user.md %} + +### Get the root certificate + +The **Connect to cluster** dialog shows information about how to connect to your cluster. + +1. Select **General connection string** from the **Select option** dropdown. +1. Open a new terminal on your local machine, and run the **CA Cert download command** provided in the **Download CA Cert** section. The client driver used in this tutorial requires this certificate to connect to CockroachDB {{ site.data.products.cloud }}. + +### Get the connection information + +1. Select **Parameters only** from the **Select option** dropdown. +1. Copy the connection information for each parameter displayed and save it in a secure location. + +
+ +
+ +Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool. + +{{site.data.alerts.callout_info}} +The ccloud CLI tool is in Preview. +{{site.data.alerts.end}} + +

Install ccloud

+ +{% include cockroachcloud/ccloud/install-ccloud.md %} + +### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string. + +{% include cockroachcloud/ccloud/quickstart.md %} + +Select **Parameters only** then copy the connection parameters displayed and save them in a secure location. + +~~~ +? How would you like to connect? Parameters only +Looking up cluster ID: succeeded +Creating SQL user: succeeded +Success! Created SQL user + name: maxroach + cluster: 37174250-b944-461f-b1c1-3a99edb6af32 +Retrieving cluster info: succeeded +Connection parameters + Database: defaultdb + Host: blue-dog-147.6wr.cockroachlabs.cloud + Password: ThisIsNotAGoodPassword + Port: 26257 + Username: maxroach +~~~ + +
+ +
+ +
+ +{% include {{ page.version.version }}/setup/start-single-node-insecure.md %} + +
diff --git a/src/current/_includes/v25.3/setup/sample-setup-parameters.md b/src/current/_includes/v25.3/setup/sample-setup-parameters.md new file mode 100644 index 00000000000..3b05dce081b --- /dev/null +++ b/src/current/_includes/v25.3/setup/sample-setup-parameters.md @@ -0,0 +1,79 @@ + +
+ + +
+ +
+ +

Choose your installation method

+ +You can install a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool. + +
+ + +
+ +
+ +### Create a free cluster + +{% include cockroachcloud/quickstart/create-a-free-cluster.md %} + +### Create a SQL user + +{% include {{ page.version.version }}/setup/create-first-sql-user.md %} + +### Get the connection information + +The **Connect to cluster** dialog shows information about how to connect to your cluster. + +1. Select **Parameters only** from the **Select option** dropdown. +1. Copy the connection information for each parameter displayed and save it in a secure location. + +
+ +
+ +Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool. + +{{site.data.alerts.callout_info}} +The ccloud CLI tool is in Preview. +{{site.data.alerts.end}} + +

Install ccloud

+ +{% include cockroachcloud/ccloud/install-ccloud.md %} + +### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string. + +{% include cockroachcloud/ccloud/quickstart.md %} + +Select **Parameters only** then copy the connection parameters displayed and save them in a secure location. + +~~~ +? How would you like to connect? Parameters only +Looking up cluster ID: succeeded +Creating SQL user: succeeded +Success! Created SQL user + name: maxroach + cluster: 37174250-b944-461f-b1c1-3a99edb6af32 +Retrieving cluster info: succeeded +Connection parameters + Database: defaultdb + Host: blue-dog-147.6wr.cockroachlabs.cloud + Password: ThisIsNotAGoodPassword + Port: 26257 + Username: maxroach +~~~ + +
+ +
+ +
+ +{% include {{ page.version.version }}/setup/start-single-node-insecure.md %} + +
diff --git a/src/current/_includes/v25.3/setup/sample-setup.md b/src/current/_includes/v25.3/setup/sample-setup.md new file mode 100644 index 00000000000..975b8a105a7 --- /dev/null +++ b/src/current/_includes/v25.3/setup/sample-setup.md @@ -0,0 +1,75 @@ + +
+ + +
+ +
+ +

Choose your installation method

+ +You can install a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool. + +
+ + +
+ +
+ +### Create a free cluster + +{% include cockroachcloud/quickstart/create-a-free-cluster.md %} + +### Create a SQL user + +{% include {{ page.version.version }}/setup/create-first-sql-user.md %} + +### Get the connection string + +The **Connect to cluster** dialog shows information about how to connect to your cluster. + +1. Select **General connection string** from the **Select option** dropdown. +1. Open the **General connection string** section, then copy the connection string provided and save it in a secure location. + + The sample application used in this tutorial uses system CA certificates for server certificate verification, so you can skip the **Download CA Cert** instructions. + + {{site.data.alerts.callout_info}} + The connection string is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`. + {{site.data.alerts.end}} + +
+ +
+ +Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool. + +{{site.data.alerts.callout_info}} +The ccloud CLI tool is in Preview. +{{site.data.alerts.end}} + +

Install ccloud

+ +{% include cockroachcloud/ccloud/install-ccloud.md %} + +### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string. + +{% include cockroachcloud/ccloud/quickstart.md %} + +Select **General connection string**, then copy the connection string displayed and save it in a secure location. The connection string is the line starting `postgresql://`. + +~~~ +? How would you like to connect? General connection string +Retrieving cluster info: succeeded + Downloading cluster cert to /Users/maxroach/.postgresql/root.crt: succeeded +postgresql://maxroach:ThisIsNotAGoodPassword@blue-dog-147.6wr.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=%2FUsers%2Fmaxroach%2F.postgresql%2Froot.crt +~~~ +
+ +
+ +
+ +{% include {{ page.version.version }}/setup/start-single-node-insecure.md %} + +
\ No newline at end of file diff --git a/src/current/_includes/v25.3/setup/start-single-node-insecure.md b/src/current/_includes/v25.3/setup/start-single-node-insecure.md new file mode 100644 index 00000000000..abf9fdf9c17 --- /dev/null +++ b/src/current/_includes/v25.3/setup/start-single-node-insecure.md @@ -0,0 +1,22 @@ +1. If you haven't already, [download the CockroachDB binary]({% link {{ page.version.version }}/install-cockroachdb.md %}). +1. Run the [`cockroach start-single-node`]({% link {{ page.version.version }}/cockroach-start-single-node.md %}) command: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach start-single-node --advertise-addr 'localhost' --insecure + ~~~ + + This starts an insecure, single-node cluster. +1. Take note of the following connection information in the SQL shell welcome text: + + ~~~ + CockroachDB node starting at 2021-08-30 17:25:30.06524 +0000 UTC (took 4.3s) + build: CCL v21.1.6 @ 2021/07/20 15:33:43 (go1.15.11) + webui: http://localhost:8080 + sql: postgresql://root@localhost:26257?sslmode=disable + ~~~ + + You'll use the `sql` connection string to connect to the cluster later in this tutorial. + + +{% include {{ page.version.version }}/prod-deployment/insecure-flag.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sidebar-data/cloud-deployments.json b/src/current/_includes/v25.3/sidebar-data/cloud-deployments.json new file mode 100644 index 00000000000..925692dbdb5 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/cloud-deployments.json @@ -0,0 +1,614 @@ +{ + "title": "Cloud Deployments", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/index.html" + ] + }, + { + "title": "Create an Account", + "urls": [ + "/cockroachcloud/create-an-account.html" + ] + }, + { + "title": "CockroachDB Cloud Costs", + "urls": [ + "/cockroachcloud/costs.html" + ] + }, + { + "title": "Basic Deployments", + "items": [ + { + "title": "Plan a Basic Cluster", + "urls": [ + "/cockroachcloud/plan-your-cluster-basic.html" + ] + }, + { + "title": "Create a Basic Cluster", + "urls": [ + "/cockroachcloud/create-a-basic-cluster.html" + ] + }, + { + "title": "Connect to a Basic Cluster", + "urls": [ + "/cockroachcloud/connect-to-a-basic-cluster.html" + ] + }, + { + "title": "Understand your Resource Usage", + "urls": [ + "/cockroachcloud/resource-usage-basic.html" + ] + }, + { + "title": "Manage a Basic Cluster", + "urls": [ + "/cockroachcloud/basic-cluster-management.html" + ] + }, + { + "title": "Use the Terraform provider", + "urls": [ + "/cockroachcloud/provision-a-cluster-with-terraform.html" + ] + }, + { + "title": "Change a Cluster's Plan Between Basic and Standard", + "urls": [ + "/cockroachcloud/change-plan-between-basic-and-standard.html" + ] + } + ] + }, + { + "title": "Standard Deployments (Preview)", + "items": [ + { + "title": "Plan a Standard Cluster", + "urls": [ + "/cockroachcloud/plan-your-cluster.html" + ] + }, + { + "title": "Create a Standard Cluster", + "urls": [ + "/cockroachcloud/create-your-cluster.html" + ] + }, + { + "title": "Connect to a Standard Cluster", + "urls": [ + "/cockroachcloud/connect-to-your-cluster.html" + ] + }, + { + "title": "Understand your Resource Usage", + "urls": [ + "/cockroachcloud/resource-usage.html" + ] + }, + { + "title": "Manage a Standard Cluster", + "urls": [ + "/cockroachcloud/cluster-management.html" + ] + }, + { + "title": "Move into Production", + "urls": [ + "/cockroachcloud/production-checklist.html" + ] + }, + { + "title": "Provision a Standard Cluster with Terraform", + "urls": [ + "/cockroachcloud/provision-a-cluster-with-terraform.html" + ] + }, + { + "title": "Migrate from Standard to Advanced", + "urls": [ + "/cockroachcloud/migrate-from-standard-to-advanced.html" + ] + }, + { + "title": "Change a Cluster's Plan Between Basic and Standard", + "urls": [ + "/cockroachcloud/change-plan-between-basic-and-standard.html" + ] + } + ] + }, + { + "title": "Advanced Deployments", + "items": [ + { + "title": "Plan an Advanced Cluster", + "urls": [ + "/cockroachcloud/plan-your-cluster-advanced.html" + ] + }, + { + "title": "Create an Advanced Cluster", + "urls": [ + "/cockroachcloud/create-an-advanced-cluster.html" + ] + }, + { + "title": "Connect to an Advanced Cluster", + "urls": [ + "/cockroachcloud/connect-to-an-advanced-cluster.html" + ] + }, + { + "title": "Manage an Advanced Cluster", + "urls": [ + "/cockroachcloud/advanced-cluster-management.html" + ] + }, + { + "title": "Move into Production", + "urls": [ + "/cockroachcloud/production-checklist.html" + ] + }, + { + "title": "Provision an Advanced Cluster with Terraform", + "urls": [ + "/cockroachcloud/provision-a-cluster-with-terraform.html?filters=advanced" + ] + }, + { + "title": "CockroachDB Advanced on Azure", + "urls": [ + "/cockroachcloud/cockroachdb-advanced-on-azure.html" + ] + }, + { + "title": "Migrate from Standard to Advanced", + "urls": [ + "/cockroachcloud/migrate-from-standard-to-advanced.html" + ] + } + ] + }, + { + "title": "CockroachDB Cloud Regions", + "urls": [ + "/cockroachcloud/regions.html" + ] + }, + { + "title": "Security", + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/security-overview.html" + ] + }, + + { + "title": "Manage AWS PrivateLink", + "urls": [ + "/cockroachcloud/aws-privatelink.html" + ] + }, + { + "title": "Customer-Managed Encryption Keys (CMEK)", + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/cmek.html" + ] + }, + { + "title": "Manage CMEK", + "urls": [ + "/cockroachcloud/managing-cmek.html" + ] + } + ] + }, + { + "title": "Compliance", + "items": [ + { + "title": "Compliance in Advanced", + "urls": [ + "/cockroachcloud/compliance.html" + ] + }, + { + "title": "PCI DSS", + "urls": [ + "/cockroachcloud/pci-dss.html" + ] + } + ] + }, + { + "title": "Authentication", + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/authentication.html" + ] + }, + { + "title": "SSO for Cloud Organizations", + "urls": [ + "/cockroachcloud/cloud-org-sso.html" + ] + }, + { + "title": "Configure Cloud Organization SSO", + "urls": [ + "/cockroachcloud/configure-cloud-org-sso.html" + ] + }, + { + "title": "Configure SCIM Provisioning", + "urls": [ + "/cockroachcloud/configure-scim-provisioning.html" + ] + }, + { + "title": "Cluster SSO using the Cloud Console", + "urls": [ + "/cockroachcloud/cloud-sso-sql.html" + ] + }, + { + "title": "SQL Client Certificate Authentication for Advanced Clusters", + "urls": [ + "/cockroachcloud/client-certs-advanced.html" + ] + } + ] + }, + { + "title": "Authorization", + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/authorization.html" + ] + }, + { + "title": "Manage Users, Roles, and Service Accounts", + "urls": [ + "/cockroachcloud/managing-access.html" + ] + }, + { + "title": "Organize CockroachDB Cloud Clusters Using Folders", + "urls": [ + "/cockroachcloud/folders.html" + ] + }, + { + "title": "Organize CockroachDB Cloud Resources Using Labels", + "urls": [ + "/cockroachcloud/labels.html" + ] + } + ] + }, + { + "title": "Network Authorization", + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/network-authorization.html" + ] + }, + { + "title": "Create Private Clusters", + "urls": [ + "/cockroachcloud/private-clusters.html" + ] + }, + { + "title": "Egress Perimeter Controls", + "urls": [ + "/cockroachcloud/egress-perimeter-controls.html" + ] + } + ] + }, + { + "title": "Audit Logs", + "items": [ + { + "title": "SQL Audit Logging", + "urls": [ + "/cockroachcloud/sql-audit-logging.html" + ] + }, + { + "title": "Export Cloud Organization Audit Logs", + "urls": [ + "/cockroachcloud/cloud-org-audit-logs.html" + ] + }, + { + "title": "Export Logs From CockroachDB {{ site.data.products.standard }}", + "urls": [ + "/cockroachcloud/export-logs.html" + ] + }, + { + "title": "Export Logs From CockroachDB {{ site.data.products.advanced }}", + "urls": [ + "/cockroachcloud/export-logs-advanced.html" + ] + } + ] + }, + { + "title": "Security Tutorials", + "items": [ + { + "title": "Configure SQL Authentication", + "urls": [ + "/${VERSION}/security-reference/config-secure-hba.html" + ] + }, + { + "title": "Satori", + "urls": [ + "/${VERSION}/satori-integration.html" + ] + }, + { + "title": "HashiCorp Vault", + "urls": [ + "/${VERSION}/hashicorp-integration.html" + ] + } + ] + } + ] + }, + { + "title": "Monitoring and Alerting", + "items": [ + { + "title": "Cluster Overview Page", + "urls": [ + "/cockroachcloud/cluster-overview-page.html" + ] + }, + { + "title": "Alerts Page", + "urls": [ + "/cockroachcloud/alerts-page.html" + ] + }, + { + "title": "Tools Page", + "urls": [ + "/cockroachcloud/tools-page.html" + ] + }, + { + "title": "Metrics", + "items": [ + { + "title": "Metrics Overview", + "urls": [ + "/cockroachcloud/metrics.html" + ] + }, + { + "title": "Overview tab", + "urls": [ + "/cockroachcloud/metrics-overview.html" + ] + }, + { + "title": "SQL", + "urls": [ + "/cockroachcloud/metrics-sql.html" + ] + }, + { + "title": "Changefeeds", + "urls": [ + "/cockroachcloud/metrics-changefeeds.html" + ] + }, + { + "title": "Row-Level TTL", + "urls": [ + "/cockroachcloud/metrics-row-level-ttl.html" + ] + }, + { + "title": "Request Units", + "urls": [ + "/cockroachcloud/metrics-request-units.html" + ] + }, + { + "title": "Custom", + "urls": [ + "/cockroachcloud/custom-metrics-chart-page.html" + ] + }, + { + "title": "Export Metrics from {{ site.data.products.standard }}", + "urls": [ + "/cockroachcloud/export-metrics.html" + ] + }, + { + "title": "Export Metrics from {{ site.data.products.advanced }}", + "urls": [ + "/cockroachcloud/export-metrics-advanced.html" + ] + }, + { + "title": "Essential Metrics for {{ site.data.products.standard }}", + "urls": [ + "/cockroachcloud/metrics-essential.html" + ] + }, + { + "title": "Essential Metrics for {{ site.data.products.advanced }}", + "urls": [ + "/${VERSION}/essential-metrics-advanced.html" + ] + }, + { + "title": "Essential Alerts for CockroachDB {{ site.data.products.advanced }} Deployments", + "urls": [ + "/${VERSION}/essential-alerts-advanced.html" + ] + } + ] + }, + { + "title": "SQL Activity Page", + "items": [ + { + "title": "Statements Page", + "urls": [ + "/cockroachcloud/statements-page.html" + ] + }, + { + "title": "Sessions Page", + "urls": [ + "/cockroachcloud/sessions-page.html" + ] + }, + { + "title": "Transactions Page", + "urls": [ + "/cockroachcloud/transactions-page.html" + ] + } + ] + }, + { + "title": "Insights Page", + "urls": [ + "/cockroachcloud/insights-page.html" + ] + }, + { + "title": "Databases Page", + "urls": [ + "/cockroachcloud/databases-page.html" + ] + }, + { + "title": "Jobs Page", + "urls": [ + "/cockroachcloud/jobs-page.html" + ] + } + ] + }, + { + "title": "Backups and Restores", + "items": [ + { + "title": "Overview", + "urls": [ + "/cockroachcloud/backup-and-restore-overview.html" + ] + }, + { + "title": "Managed Backups", + "items": [ + { + "title": "Managed Backups for Basic Clusters", + "urls": [ + "/cockroachcloud/managed-backups-basic.html" + ] + }, + { + "title": "Managed Backups for Standard Clusters", + "urls": [ + "/cockroachcloud/managed-backups.html" + ] + }, + { + "title": "Managed Backups for Advanced Clusters", + "urls": [ + "/cockroachcloud/managed-backups-advanced.html" + ] + } + ] + }, + { + "title": "Take and Restore Self-Managed Backups", + "urls": [ + "/cockroachcloud/take-and-restore-self-managed-backups.html" + ] + }, + { + "title": "Monitoring", + "urls": [ + "/cockroachcloud/backup-and-restore-monitoring.html" + ] + } + ] + }, + { + "title": "Billing Management", + "urls": [ + "/cockroachcloud/billing-management.html" + ] + }, + { + "title": "Upgrade", + "items": [ + { + "title": "CockroachDB Cloud Upgrade Policy", + "urls": [ + "/cockroachcloud/upgrade-policy.html" + ] + }, + { + "title": "Upgrade a cluster", + "urls": [ + "/cockroachcloud/upgrade-cockroach-version.html" + ] + } + ] + }, + { + "title": "SQL Shell", + "urls": [ + "/cockroachcloud/sql-shell.html" + ] + }, + { + "title": "CockroachDB Cloud API", + "urls": [ + "/cockroachcloud/cloud-api.html" + ] + }, + { + "title": "ccloud CLI", + "urls": [ + "/cockroachcloud/ccloud-get-started.html" + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/connect-to-cockroachdb.json b/src/current/_includes/v25.3/sidebar-data/connect-to-cockroachdb.json new file mode 100644 index 00000000000..640358304ad --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/connect-to-cockroachdb.json @@ -0,0 +1,30 @@ +{ + "title": "Connect to an Application", + "is_top_level": true, + "items": [ + { + "title": "Install a Driver or ORM Framework", + "urls": [ + "/${VERSION}/install-client-drivers.html" + ] + }, + { + "title": "Connect to a Cluster", + "urls": [ + "/${VERSION}/connect-to-the-database.html" + ] + }, + { + "title": "Client Connection Parameters", + "urls": [ + "/${VERSION}/connection-parameters.html" + ] + }, + { + "title": "Connection Pooling", + "urls": [ + "/${VERSION}/connection-pooling.html" + ] + } + ] +} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sidebar-data/cross-cluster-replication.json b/src/current/_includes/v25.3/sidebar-data/cross-cluster-replication.json new file mode 100644 index 00000000000..925e3e2aca9 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/cross-cluster-replication.json @@ -0,0 +1,99 @@ +{ + "title": "Cross-Cluster Replication", + "is_top_level": true, + "items": [ + { + "title": "Logical Data Replication", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/logical-data-replication-overview.html" + ] + }, + { + "title": "Set Up Logical Data Replication", + "urls": [ + "/${VERSION}/set-up-logical-data-replication.html" + ] + }, + { + "title": "Manage Logical Data Replication", + "urls": [ + "/${VERSION}/manage-logical-data-replication.html" + ] + }, + { + "title": "Monitor Logical Data Replication", + "urls": [ + "/${VERSION}/logical-data-replication-monitoring.html" + ] + } + ] + }, + { + "title": "Physical Cluster Replication", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/physical-cluster-replication-overview.html" + ] + }, + { + "title": "Set Up Physical Cluster Replication", + "urls": [ + "/${VERSION}/set-up-physical-cluster-replication.html" + ] + }, + { + "title": "Fail Over from a Primary to a Standby Cluster", + "urls": [ + "/${VERSION}/failover-replication.html" + ] + }, + { + "title": "Monitor a Replication Stream", + "urls": [ + "/${VERSION}/physical-cluster-replication-monitoring.html" + ] + }, + { + "title": "Technical Overview", + "urls": [ + "/${VERSION}/physical-cluster-replication-technical-overview.html" + ] + }, + { + "title": "Cluster Virtualization", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/cluster-virtualization-overview.html" + ] + }, + { + "title": "Work with Virtual Clusters", + "urls": [ + "/${VERSION}/work-with-virtual-clusters.html" + ] + }, + { + "title": "Setting Scopes", + "urls": [ + "/${VERSION}/cluster-virtualization-setting-scopes.html" + ] + }, + { + "title": "Metric Scopes", + "urls": [ + "/${VERSION}/cluster-virtualization-metric-scopes.html" + ] + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sidebar-data/faqs.json b/src/current/_includes/v25.3/sidebar-data/faqs.json new file mode 100644 index 00000000000..01887487d4b --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/faqs.json @@ -0,0 +1,24 @@ +{ + "title": "FAQs", + "is_top_level": true, + "items": [ + { + "title": "CockroachDB FAQs", + "urls": [ + "/${VERSION}/frequently-asked-questions.html" + ] + }, + { + "title": "Operational FAQs", + "urls": [ + "/${VERSION}/operational-faqs.html" + ] + }, + { + "title": "Licensing FAQs", + "urls": [ + "/${VERSION}/licensing-faqs.html" + ] + } + ] + } diff --git a/src/current/_includes/v25.3/sidebar-data/feature-overview.json b/src/current/_includes/v25.3/sidebar-data/feature-overview.json new file mode 100644 index 00000000000..baf824af1ae --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/feature-overview.json @@ -0,0 +1,65 @@ +{ + "title": "Feature Overview", + "is_top_level": true, + "items": [ + { + "title": "CockroachDB Basics", + "items": [ + { + "title": "Why CockroachDB?", + "urls": [ + "/${VERSION}/why-cockroachdb.html" + ] + }, + { + "title": "Replication & Rebalancing", + "urls": [ + "/${VERSION}/demo-replication-and-rebalancing.html" + ] + }, + { + "title": "CockroachDB Resilience", + "urls": [ + "/${VERSION}/demo-cockroachdb-resilience.html" + ] + }, + { + "title": "Serializable Transactions", + "urls": [ + "/${VERSION}/demo-serializable.html" + ] + }, + { + "title": "Multi-Active Availability", + "urls": [ + "/${VERSION}/multi-active-availability.html" + ] + } + ] + }, + { + "title": "CockroachDB SQL", + "items": [ + { + "title": "PostgreSQL Compatibility", + "urls": [ + "/${VERSION}/postgresql-compatibility.html" + ] + }, + { + "title": "SQL Features", + "urls": [ + "/${VERSION}/sql-feature-support.html" + ] + }, + { + "title": "SQL FAQs", + "urls": [ + "/${VERSION}/sql-faqs.html" + ] + } + ] + } + ] +} + diff --git a/src/current/_includes/v25.3/sidebar-data/get-started.json b/src/current/_includes/v25.3/sidebar-data/get-started.json new file mode 100644 index 00000000000..4a48e9e48b5 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/get-started.json @@ -0,0 +1,218 @@ +{ + "title": "Get Started", + "is_top_level": true, + "items": [{ + "title": "Quickstart", + "urls": [ + "/cockroachcloud/quickstart.html" + ] + }, + { + "title": "Install CockroachDB", + "urls": [ + "/${VERSION}/install-cockroachdb.html", + "/${VERSION}/install-cockroachdb-mac.html", + "/${VERSION}/install-cockroachdb-linux.html", + "/${VERSION}/fips.html", + "/${VERSION}/install-cockroachdb-windows.html" + ] + }, + { + "title": "Learn CockroachDB SQL", + "urls": [ + "/cockroachcloud/learn-cockroachdb-sql.html", + "/${VERSION}/learn-cockroachdb-sql.html" + ] + }, + { + "title": "Develop with CockroachDB", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/developer-guide-overview.html" + ] + }, + { + "title": "Developer Basics", + "urls": [ + "/${VERSION}/developer-basics.html" + ] + }, + { + "title": "JavaScript/TypeScript", + "urls": [ + "/${VERSION}/build-a-nodejs-app-with-cockroachdb.html", + "/${VERSION}/build-a-nodejs-app-with-cockroachdb-sequelize.html", + "/${VERSION}/build-a-nodejs-app-with-cockroachdb-knexjs.html", + "/${VERSION}/build-a-nodejs-app-with-cockroachdb-prisma.html", + "/${VERSION}/build-a-typescript-app-with-cockroachdb.html" + ] + }, + { + "title": "Python", + "urls": [ + "/${VERSION}/build-a-python-app-with-cockroachdb-psycopg3.html", + "/${VERSION}/build-a-python-app-with-cockroachdb.html", + "/${VERSION}/build-a-python-app-with-cockroachdb-sqlalchemy.html", + "/${VERSION}/build-a-python-app-with-cockroachdb-django.html", + "/${VERSION}/build-a-python-app-with-cockroachdb-asyncpg.html" + ] + }, + { + "title": "Golang", + "urls": [ + "/${VERSION}/build-a-go-app-with-cockroachdb.html", + "/${VERSION}/build-a-go-app-with-cockroachdb-gorm.html", + "/${VERSION}/build-a-go-app-with-cockroachdb-pq.html", + "/${VERSION}/build-a-go-app-with-cockroachdb-upperdb.html" + ] + }, + { + "title": "Java", + "urls": [ + "/${VERSION}/build-a-java-app-with-cockroachdb.html", + "/${VERSION}/build-a-java-app-with-cockroachdb-hibernate.html", + "/${VERSION}/build-a-java-app-with-cockroachdb-jooq.html", + "/${VERSION}/build-a-spring-app-with-cockroachdb-mybatis.html" + ] + }, + { + "title": "Ruby", + "urls": [ + "/${VERSION}/build-a-ruby-app-with-cockroachdb.html", + "/${VERSION}/build-a-ruby-app-with-cockroachdb-activerecord.html" + ] + }, + { + "title": "C# (.NET)", + "urls": [ + "/${VERSION}/build-a-csharp-app-with-cockroachdb.html" + ] + }, + { + "title": "Rust", + "urls": [ + "/${VERSION}/build-a-rust-app-with-cockroachdb.html" + ] + }, + { + "title": "Hasura (GraphQL)", + "urls": [ + "/${VERSION}/hasura-getting-started.html" + ] + } + ] + }, + { + "title": "Standard Examples", + "items": [ + { + "title": "AWS Lambda", + "urls": [ + "/${VERSION}/deploy-lambda-function.html" + ] + }, + { + "title": "Google Cloud Run", + "urls": [ + "/${VERSION}/deploy-app-gcr.html" + ] + }, + { + "title": "Netlify", + "urls": [ + "/${VERSION}/deploy-app-netlify.html" + ] + }, + { + "title": "Vercel", + "urls": [ + "/${VERSION}/deploy-app-vercel.html" + ] + }, + { + "title": "Serverless Function Best Practices", + "urls": [ + "/${VERSION}/serverless-function-best-practices.html" + ] + } + ] + }, + { + "title": "Advanced Example Applications", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/example-apps.html" + ] + }, + { + "title": "Spring Boot", + "items": [ + { + "title": "Spring Boot with JDBC", + "urls": [ + "/${VERSION}/build-a-spring-app-with-cockroachdb-jdbc.html" + ] + }, + { + "title": "Spring Boot with JPA", + "urls": [ + "/${VERSION}/build-a-spring-app-with-cockroachdb-jpa.html" + ] + } + ] + }, + { + "title": "MovR", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/movr.html" + ] + }, + { + "title": "Global Application Use Case", + "urls": [ + "/${VERSION}/movr-flask-use-case.html" + ] + }, + { + "title": "Multi-region Database Schema", + "urls": [ + "/${VERSION}/movr-flask-database.html" + ] + }, + { + "title": "Set up a Development Environment", + "urls": [ + "/${VERSION}/movr-flask-setup.html" + ] + }, + { + "title": "Develop a Global Application", + "urls": [ + "/${VERSION}/movr-flask-application.html" + ] + }, + { + "title": "Deploy a Global Application", + "urls": [ + "/${VERSION}/movr-flask-deployment.html" + ] + } + ] + }, + { + "title": "Deploy a Python To-Do App with Flask, Kubernetes, and CockroachDB Cloud", + "urls": [ + "/cockroachcloud/deploy-a-python-to-do-app-with-flask-kubernetes-and-cockroachcloud.html" + ] + } + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/latest-releases.json b/src/current/_includes/v25.3/sidebar-data/latest-releases.json new file mode 100644 index 00000000000..e25f8c0fa7a --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/latest-releases.json @@ -0,0 +1,7 @@ +{ + "title": "Latest Releases", + "is_top_level": true, + "items": [ + {% include_cached sidebar-latest-releases.json %} + ] + } diff --git a/src/current/_includes/v25.3/sidebar-data/migrate.json b/src/current/_includes/v25.3/sidebar-data/migrate.json new file mode 100644 index 00000000000..5448baf143e --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/migrate.json @@ -0,0 +1,147 @@ +{ + "title": "Migrate", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/molt/migration-overview.html" + ] + }, + { + "title": "Migration Strategy", + "urls": [ + "/molt/migration-strategy.html" + ] + }, + { + "title": "Migrate to CockroachDB", + "urls": [ + "/molt/migrate-to-cockroachdb.html" + ] + }, + { + "title": "Migrate in Phases", + "urls": [ + "/molt/migrate-in-phases.html" + ] + }, + { + "title": "Migration Failback", + "urls": [ + "/molt/migrate-failback.html" + ] + }, + { + "title": "MOLT Tools", + "items": [ + { + "title": "Schema Conversion Tool", + "urls": [ + "/cockroachcloud/migrations-page.html" + ] + }, + { + "title": "Fetch", + "urls": [ + "/molt/molt-fetch.html" + ] + }, + { + "title": "Verify", + "urls": [ + "/molt/molt-verify.html" + ] + } + ] + }, + { + "title": "Third-Party Migration Tools", + "items": [ + { + "title": "AWS DMS", + "urls": [ + "/${VERSION}/aws-dms.html" + ] + }, + { + "title": "Qlik Replicate", + "urls": [ + "/${VERSION}/qlik.html" + ] + }, + { + "title": "Striim", + "urls": [ + "/${VERSION}/striim.html" + ] + }, + { + "title": "Oracle GoldenGate", + "urls": [ + "/${VERSION}/goldengate.html" + ] + }, + { + "title": "Debezium", + "urls": [ + "/${VERSION}/debezium.html" + ] + } + ] + }, + { + "title": "Migrate Data Types", + "items": [ + { + "title": "Migrate from CSV", + "urls": [ + "/${VERSION}/migrate-from-csv.html" + ] + }, + { + "title": "Migrate from Avro", + "urls": [ + "/${VERSION}/migrate-from-avro.html" + ] + }, + { + "title": "Migrate from Shapefiles", + "urls": [ + "/${VERSION}/migrate-from-shapefiles.html" + ] + }, + { + "title": "Migrate from OpenStreetMap", + "urls": [ + "/${VERSION}/migrate-from-openstreetmap.html" + ] + }, + { + "title": "Migrate from GeoJSON", + "urls": [ + "/${VERSION}/migrate-from-geojson.html" + ] + }, + { + "title": "Migrate from GeoPackage", + "urls": [ + "/${VERSION}/migrate-from-geopackage.html" + ] + }, + { + "title": "Import Performance Best Practices", + "urls": [ + "/${VERSION}/import-performance-best-practices.html" + ] + } + ] + }, + { + "title": "Migrate from Oracle", + "urls": [ + "/${VERSION}/migrate-from-oracle.html" + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/multi-region-capabilities.json b/src/current/_includes/v25.3/sidebar-data/multi-region-capabilities.json new file mode 100644 index 00000000000..6ba988ae925 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/multi-region-capabilities.json @@ -0,0 +1,76 @@ +{ + "title": "Multi-Region Capabilities", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/multiregion-overview.html" + ] + }, + { + "title": "Survival Goals", + "urls": [ + "/${VERSION}/multiregion-survival-goals.html" + ] + }, + { + "title": "Data Domiciling and Performance", + "items": [ + { + "title": "Table Localities", + "urls": [ + "/${VERSION}/table-localities.html" + ] + }, + { + "title": "Table Partitioning", + "urls": [ + "/${VERSION}/partitioning.html" + ] + } + ] + }, + { + "title": "How to Choose a Multi-Region Configuration", + "urls": [ + "/${VERSION}/choosing-a-multi-region-configuration.html" + ] + }, + { + "title": "Multi-Region Zone Config Extensions", + "urls": [ + "/${VERSION}/zone-config-extensions.html" + ] + }, + { + "title": "Multi-Region Tutorials", + "items": [ + { + "title": "Low Latency Reads and Writes", + "urls": [ + "/${VERSION}/demo-low-latency-multi-region-deployment.html" + ] + }, + { + "title": "Data Domiciling with CockroachDB", + "urls": [ + "/${VERSION}/data-domiciling.html" + ] + }, + { + "title": "Migrate to Multi-Region SQL with Replication Zones", + "urls": [ + "/${VERSION}/migrate-to-multiregion-sql.html" + ] + }, + { + "title": "Using GeoServer with CockroachDB", + "urls": [ + "/${VERSION}/geoserver.html" + ] + } + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/optimize-performance.json b/src/current/_includes/v25.3/sidebar-data/optimize-performance.json new file mode 100644 index 00000000000..72b7be7b48b --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/optimize-performance.json @@ -0,0 +1,96 @@ +{ + "title": "Optimize Performance", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/make-queries-fast.html" + ] + }, + { + "title": "SQL Performance Best Practices", + "urls": [ + "/${VERSION}/performance-best-practices-overview.html" + ] + }, + + { + "title": "Indexes", + "urls": [ + "/${VERSION}/indexes.html" + ] + }, + { + "title": "Map SQL Activity using an Application Name", + "urls": [ + "/${VERSION}/map-sql-activity-to-app.html" + ] + }, + { + "title": "Cost-Based Optimizer", + "urls": [ + "/${VERSION}/cost-based-optimizer.html" + ] + }, + { + "title": "Vectorized Execution Engine", + "urls": [ + "/${VERSION}/vectorized-execution.html" + ] + }, + { + "title": "Load-Based Splitting", + "urls": [ + "/${VERSION}/load-based-splitting.html" + ] + }, + { + "title": "Replication Controls", + "urls": [ + "/${VERSION}/configure-replication-zones.html" + ] + }, + { + "title": "Admission Control", + "urls": [ + "/${VERSION}/admission-control.html" + ] + }, + { + "title": "Monitor and Analyze Transaction Contention", + "urls": [ + "/${VERSION}/monitor-and-analyze-transaction-contention.html" + ] + }, + { + "title": "Performance Tuning Recipes", + "urls": [ + "/${VERSION}/performance-recipes.html" + ] + }, + { + "title": "Performance Tuning Tutorials", + "items": [ + { + "title": "Statement Tuning with EXPLAIN", + "urls": [ + "/${VERSION}/sql-tuning-with-explain.html" + ] + }, + { + "title": "Apply SQL Statement Performance Rules", + "urls": [ + "/${VERSION}/apply-statement-performance-rules.html" + ] + }, + { + "title": "Troubleshoot Lock Contention", + "urls": [ + "/${VERSION}/troubleshoot-lock-contention.html" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sidebar-data/reads-and-writes.json b/src/current/_includes/v25.3/sidebar-data/reads-and-writes.json new file mode 100644 index 00000000000..5dd310e5f1d --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/reads-and-writes.json @@ -0,0 +1,189 @@ +{ + "title": "Reads and Writes", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/architecture/reads-and-writes-overview.html" + ] + }, + { + "title": "Read Data", + "items": [ + { + "title": "Query Data", + "urls": [ + "/${VERSION}/query-data.html" + ] + }, + { + "title": "Reusable Views", + "urls": [ + "/${VERSION}/views.html" + ] + }, + { + "title": "Subqueries", + "urls": [ + "/${VERSION}/subqueries.html" + ] + }, + { + "title": "Temporary Tables", + "urls": [ + "/${VERSION}/temporary-tables.html" + ] + }, + { + "title": "Paginate Results", + "urls": [ + "/${VERSION}/pagination.html" + ] + }, + { + "title": "Follower Reads", + "urls": [ + "/${VERSION}/follower-reads.html" + ] + }, + { + "title": "AS OF SYSTEM TIME", + "urls": [ + "/${VERSION}/as-of-system-time.html" + ] + }, + { + "title": "Query Spatial Data", + "urls": [ + "/${VERSION}/query-spatial-data.html" + ] + }, + { + "title": "Export Spatial Data", + "urls": [ + "/${VERSION}/export-spatial-data.html" + ] + } + ] + }, + { + "title": "Write Data", + "items": [ + { + "title": "Insert Data", + "urls": [ + "/${VERSION}/insert-data.html" + ] + }, + { + "title": "Update Data", + "urls": [ + "/${VERSION}/update-data.html" + ] + }, + { + "title": "Bulk-update Data", + "urls": [ + "/${VERSION}/bulk-update-data.html" + ] + }, + { + "title": "Delete Data", + "urls": [ + "/${VERSION}/delete-data.html" + ] + }, + { + "title": "Bulk-delete Data", + "urls": [ + "/${VERSION}/bulk-delete-data.html" + ] + }, + { + "title": "Batch Delete Expired Data with Row-Level TTL", + "urls": [ + "/${VERSION}/row-level-ttl.html" + ] + } + ] + }, + { + "title": "Transactions", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/transactions.html" + ] + }, + { + "title": "Advanced Client-side Transaction Retries", + "urls": [ + "/${VERSION}/advanced-client-side-transaction-retries.html" + ] + }, + { + "title": "Life of a Distributed Transaction", + "urls": [ + "/${VERSION}/architecture/life-of-a-distributed-transaction.html" + ] + }, + { + "title": "Read Committed Transactions", + "urls": [ + "/${VERSION}/read-committed.html" + ] + } + ] + }, + { + "title": "Test Your Application Locally", + "urls": [ + "/${VERSION}/local-testing.html" + ] + }, + { + "title": "JSON Support", + "urls": [ + "/${VERSION}/demo-json-support.html" + ] + }, + { + "title": "Spatial Data", + "urls": [ + "/${VERSION}/spatial-tutorial.html" + ] + }, + { + "title": "Cross-Cloud Migration", + "urls": [ + "/${VERSION}/demo-automatic-cloud-migration.html" + ] + }, + { + "title": "SQL Playground", + "is_top_level": true, + "urls": [ + "https://www.cockroachlabs.com/docs/tutorials/sql-playground" + ] + }, + { + "title": "Database Management Tools", + "items": [ + { + "title": "DBeaver GUI", + "urls": [ + "/${VERSION}/dbeaver.html" + ] + }, + { + "title": "IntelliJ IDEA", + "urls": [ + "/${VERSION}/intellij-idea.html" + ] + } + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/reference.json b/src/current/_includes/v25.3/sidebar-data/reference.json new file mode 100644 index 00000000000..efcd6296367 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/reference.json @@ -0,0 +1,435 @@ +{ + "title": "Reference", + "is_top_level": true, + "items": [ + { + "title": "Glossary", + "urls": [ + "/${VERSION}/architecture/glossary.html" + ] + }, + { + "title": "Architecture", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/architecture/overview.html" + ] + }, + { + "title": "SQL Layer", + "urls": [ + "/${VERSION}/architecture/sql-layer.html" + ] + }, + { + "title": "Transaction Layer", + "urls": [ + "/${VERSION}/architecture/transaction-layer.html" + ] + }, + { + "title": "Distribution Layer", + "urls": [ + "/${VERSION}/architecture/distribution-layer.html" + ] + }, + { + "title": "Replication Layer", + "urls": [ + "/${VERSION}/architecture/replication-layer.html" + ] + }, + { + "title": "Storage Layer", + "urls": [ + "/${VERSION}/architecture/storage-layer.html" + ] + }, + { + "title": "Backups", + "urls": [ + "/${VERSION}/backup-architecture.html" + ] + } + ] + }, + { + "title": "Cloud API", + "urls": [ + "https://www.cockroachlabs.com/docs/api/cloud/v1" + ] + }, + { + "title": "Cluster API", + "urls": [ + "https://www.cockroachlabs.com/docs/api/cluster/v2" + ] + }, + { + "title": "Cluster Settings", + "urls": [ + "/${VERSION}/cluster-settings.html" + ] + }, + { + "title": "Cockroach Commands", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/cockroach-commands.html" + ] + }, + { + "title": "cockroach start", + "urls": [ + "/${VERSION}/cockroach-start.html" + ] + }, + { + "title": "cockroach init", + "urls": [ + "/${VERSION}/cockroach-init.html" + ] + }, + { + "title": "cockroach start-single-node", + "urls": [ + "/${VERSION}/cockroach-start-single-node.html" + ] + }, + { + "title": "cockroach cert", + "urls": [ + "/${VERSION}/cockroach-cert.html" + ] + }, + { + "title": "cockroach sql", + "urls": [ + "/${VERSION}/cockroach-sql.html" + ] + }, + { + "title": "cockroach sqlfmt", + "urls": [ + "/${VERSION}/cockroach-sqlfmt.html" + ] + }, + { + "title": "cockroach node", + "urls": [ + "/${VERSION}/cockroach-node.html" + ] + }, + { + "title": "cockroach nodelocal upload", + "urls": [ + "/${VERSION}/cockroach-nodelocal-upload.html" + ] + }, + { + "title": "cockroach auth-session", + "urls": [ + "/${VERSION}/cockroach-auth-session.html" + ] + }, + { + "title": "cockroach demo", + "urls": [ + "/${VERSION}/cockroach-demo.html" + ] + }, + { + "title": "cockroach debug ballast", + "urls": [ + "/${VERSION}/cockroach-debug-ballast.html" + ] + }, + { + "title": "cockroach debug encryption-active-key", + "urls": [ + "/${VERSION}/cockroach-debug-encryption-active-key.html" + ] + }, + { + "title": "cockroach debug encryption-decrypt", + "urls": [ + "/${VERSION}/cockroach-debug-encryption-decrypt.html" + ] + }, + { + "title": "cockroach debug job-trace", + "urls": [ + "/${VERSION}/cockroach-debug-job-trace.html" + ] + }, + { + "title": "cockroach debug list-files", + "urls": [ + "/${VERSION}/cockroach-debug-list-files.html" + ] + }, + { + "title": "cockroach debug merge-logs", + "urls": [ + "/${VERSION}/cockroach-debug-merge-logs.html" + ] + }, + { + "title": "cockroach debug tsdump", + "urls": [ + "/${VERSION}/cockroach-debug-tsdump.html" + ] + }, + { + "title": "cockroach debug zip", + "urls": [ + "/${VERSION}/cockroach-debug-zip.html" + ] + }, + { + "title": "cockroach statement-diag", + "urls": [ + "/${VERSION}/cockroach-statement-diag.html" + ] + }, + { + "title": "cockroach gen", + "urls": [ + "/${VERSION}/cockroach-gen.html" + ] + }, + { + "title": "cockroach userfile upload", + "urls": [ + "/${VERSION}/cockroach-userfile-upload.html" + ] + }, + { + "title": "cockroach userfile list", + "urls": [ + "/${VERSION}/cockroach-userfile-list.html" + ] + }, + { + "title": "cockroach userfile get", + "urls": [ + "/${VERSION}/cockroach-userfile-get.html" + ] + }, + { + "title": "cockroach userfile delete", + "urls": [ + "/${VERSION}/cockroach-userfile-delete.html" + ] + }, + { + "title": "cockroach version", + "urls": [ + "/${VERSION}/cockroach-version.html" + ] + }, + { + "title": "cockroach workload", + "urls": [ + "/${VERSION}/cockroach-workload.html" + ] + }, + { + "title": "The cockroach-sql command", + "urls": [ + "/${VERSION}/cockroach-sql-binary.html" + ] + } + ] + }, + { + "title": "Diagnostics Reporting", + "urls": [ + "/${VERSION}/diagnostics-reporting.html" + ] + }, + { + "title": "Logs", + "items": [ + { + "title": "Logging Levels and Channels", + "urls": [ + "/${VERSION}/logging.html" + ] + }, + { + "title": "Log Formats", + "urls": [ + "/${VERSION}/log-formats.html" + ] + }, + { + "title": "Notable Event Types", + "urls": [ + "/${VERSION}/eventlog.html" + ] + } + ] + }, + { + "title": "Metrics", + "items": [ + { + "title": "Available Metrics", + "urls": [ + "/${VERSION}/metrics.html" + ] + }, + { + "title": "Multi-Dimensional Metrics", + "urls": [ + "/${VERSION}/multi-dimensional-metrics.html" + ] + } + ] + }, + { + "title": "Policies", + "items": [ + { + "title": "CockroachDB Feature Availability", + "urls": [ + "/${VERSION}/cockroachdb-feature-availability.html" + ] + }, + { + "title": "API Support Policy", + "urls": [ + "/${VERSION}/api-support-policy.html" + ] + }, + { + "title": "Telemetry Collected by CockroachDB", + "urls": [ + "/${VERSION}/telemetry.html" + ] + } + ] + }, + { + "title": "Third-Party Support", + "items": [ + { + "title": "Supported Tools", + "urls": [ + "/${VERSION}/third-party-database-tools.html" + ] + }, + { + "title": "Monitoring Integrations", + "urls": [ + "/${VERSION}/third-party-monitoring-tools.html" + ] + }, + { + "title": "Community-supported Tools", + "urls": [ + "/${VERSION}/community-tooling.html" + ] + } + ] + }, + { + "title": "Security", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/security-reference/security-overview.html" + ] + }, + { + "title": "Authentication", + "items": [ + { + "title": "SQL Authentication", + "urls": [ + "/${VERSION}/security-reference/authentication.html" + ] + }, + { + "title": "SASL/SCRAM-SHA-256 Secure Password-based Authentication", + "urls": [ + "/${VERSION}/security-reference/scram-authentication.html" + ] + } + ] + }, + { + "title": "Authorization", + "urls": [ + "/${VERSION}/security-reference/authorization.html" + ] + }, + { + "title": "Encryption", + "urls": [ + "/${VERSION}/security-reference/encryption.html" + ] + }, + { + "title": "Column Level Encryption", + "urls": [ + "/${VERSION}/column-level-encryption.html" + ] + }, + { + "title": "Row-level Security", + "urls": [ + "/${VERSION}/row-level-security.html" + ] + }, + { + "title": "PKI and TLS", + "urls": [ + "/${VERSION}/security-reference/transport-layer-security.html" + ] + } + ] + }, + { + "title": "System Catalogs", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/system-catalogs.html" + ] + }, + { + "title": "crdb_internal", + "urls": [ + "/${VERSION}/crdb-internal.html" + ] + }, + { + "title": "information_schema", + "urls": [ + "/${VERSION}/information-schema.html" + ] + }, + { + "title": "pg_catalog", + "urls": [ + "/${VERSION}/pg-catalog.html" + ] + }, + { + "title": "pg_extension", + "urls": [ + "/${VERSION}/pg-extension.html" + ] + } + ] + } + ] + } diff --git a/src/current/_includes/v25.3/sidebar-data/releases.json b/src/current/_includes/v25.3/sidebar-data/releases.json new file mode 100644 index 00000000000..fb49f7c9acc --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/releases.json @@ -0,0 +1,7 @@ +{ + "title": "CockroachDB Releases", + "is_top_level": true, + "items": [ + {% include_cached sidebar-all-releases.json %} + ] + } diff --git a/src/current/_includes/v25.3/sidebar-data/resilience.json b/src/current/_includes/v25.3/sidebar-data/resilience.json new file mode 100644 index 00000000000..df6d97fbd9b --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/resilience.json @@ -0,0 +1,18 @@ +{ + "title": "Data Resilience", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/data-resilience.html" + ] + }, + { + "title": "Disaster Recovery", + "urls": [ + "/${VERSION}/disaster-recovery-overview.html" + ] + } + ] +} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sidebar-data/schema-design.json b/src/current/_includes/v25.3/sidebar-data/schema-design.json new file mode 100644 index 00000000000..905508f98e6 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/schema-design.json @@ -0,0 +1,141 @@ +{ + "title": "Schema Design", + "items": [ + { + "title": "Database Schemas", + "urls": [ + "/${VERSION}/schema-design-overview.html" + ] + }, + { + "title": "Create a Database", + "urls": [ + "/${VERSION}/schema-design-database.html" + ] + }, + { + "title": "Create a User-defined Schema", + "urls": [ + "/${VERSION}/schema-design-schema.html" + ] + }, + { + "title": "Create a Table", + "urls": [ + "/${VERSION}/schema-design-table.html" + ] + }, + { + "title": "Computed Columns", + "urls": [ + "/${VERSION}/computed-columns.html" + ] + }, + { + "title": "Column Families", + "urls": [ + "/${VERSION}/column-families.html" + ] + }, + { + "title": "Scale to Multiple Regions", + "urls": [ + "/${VERSION}/multiregion-scale-application.html" + ] + }, + { + "title": "Indexes", + "items": [ + { + "title": "Secondary Indexes", + "urls": [ + "/${VERSION}/schema-design-indexes.html" + ] + }, + { + "title": "Partial Indexes", + "urls": [ + "/${VERSION}/partial-indexes.html" + ] + }, + { + "title": "Hash-sharded Indexes", + "urls": [ + "/${VERSION}/hash-sharded-indexes.html" + ] + }, + { + "title": "Generalized Inverted Indexes", + "urls": [ + "/${VERSION}/inverted-indexes.html" + ] + }, + { + "title": "Full-Text Search", + "urls": [ + "/${VERSION}/full-text-search.html" + ] + }, + { + "title": "Trigram Indexes", + "urls": [ + "/${VERSION}/trigram-indexes.html" + ] + }, + { + "title": "Expression Indexes", + "urls": [ + "/${VERSION}/expression-indexes.html" + ] + }, + { + "title": "Spatial Indexes", + "urls": [ + "/${VERSION}/spatial-indexes.html" + ] + }, + { + "title": "Vector Indexes", + "urls": [ + "/${VERSION}/vector-indexes.html" + ] + } + ] + }, + { + "title": "Update a Database Schema", + "items": [ + { + "title": "Change and Remove Objects", + "urls": [ + "/${VERSION}/schema-design-update.html" + ] + }, + { + "title": "Online Schema Changes", + "urls": [ + "/${VERSION}/online-schema-changes.html" + ] + }, + { + "title": "Use Alembic", + "urls": [ + "/${VERSION}/alembic.html" + ] + }, + { + "title": "Use Flyway", + "urls": [ + "/${VERSION}/flyway.html" + ] + }, + { + "title": "Use Liquibase", + "urls": [ + "/${VERSION}/liquibase.html" + ] + } + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/self-hosted-deployments.json b/src/current/_includes/v25.3/sidebar-data/self-hosted-deployments.json new file mode 100644 index 00000000000..d5ce5fe0975 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/self-hosted-deployments.json @@ -0,0 +1,763 @@ +{ + "title": "Self-Hosted Deployments", + "is_top_level": true, + "items": [ + { + "title": "Production Checklist", + "urls": [ + "/${VERSION}/recommended-production-settings.html" + ] + }, + { + "title": "Deployment and Operations Skills Taxonomy", + "urls": [ + "/${VERSION}/deployment-operations-skills-taxonomy.html" + ] + }, + { + "title": "Deploy Locally", + "items": [ + { + "title": "Deploy from Binary", + "urls": [ + "/${VERSION}/secure-a-cluster.html", + "/${VERSION}/start-a-local-cluster.html" + ] + }, + { + "title": "Deploy in Kubernetes", + "urls": [ + "/${VERSION}/orchestrate-a-local-cluster-with-kubernetes.html", + "/${VERSION}/orchestrate-a-local-cluster-with-kubernetes-insecure.html" + ] + }, + { + "title": "Deploy in Docker", + "urls": [ + "/${VERSION}/start-a-local-cluster-in-docker-mac.html", + "/${VERSION}/start-a-local-cluster-in-docker-linux.html", + "/${VERSION}/start-a-local-cluster-in-docker-windows.html" + ] + }, + { + "title": "Simulate a Multi-Region Cluster Locally", + "urls": [ + "/${VERSION}/simulate-a-multi-region-cluster-on-localhost.html" + ] + } + ] + }, + { + "title": "Deploy Manually", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/manual-deployment.html" + ] + }, + { + "title": "Deploy On-Premises", + "urls": [ + "/${VERSION}/deploy-cockroachdb-on-premises.html", + "/${VERSION}/deploy-cockroachdb-on-premises-insecure.html" + ] + }, + { + "title": "Deploy on AWS", + "urls": [ + "/${VERSION}/deploy-cockroachdb-on-aws.html", + "/${VERSION}/deploy-cockroachdb-on-aws-insecure.html" + ] + }, + { + "title": "Deploy on Azure", + "urls": [ + "/${VERSION}/deploy-cockroachdb-on-microsoft-azure.html", + "/${VERSION}/deploy-cockroachdb-on-microsoft-azure-insecure.html" + ] + }, + { + "title": "Deploy on Google Cloud Platform GCE", + "urls": [ + "/${VERSION}/deploy-cockroachdb-on-google-cloud-platform.html", + "/${VERSION}/deploy-cockroachdb-on-google-cloud-platform-insecure.html" + ] + }, + { + "title": "Deploy on Digital Ocean", + "urls": [ + "/${VERSION}/deploy-cockroachdb-on-digital-ocean.html", + "/${VERSION}/deploy-cockroachdb-on-digital-ocean-insecure.html" + ] + } + ] + }, + { + "title": "Deploy on Kubernetes", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/kubernetes-overview.html" + ] + }, + { + "title": "Single-Cluster Deployment", + "urls": [ + "/${VERSION}/deploy-cockroachdb-with-kubernetes.html", + "/${VERSION}/deploy-cockroachdb-with-kubernetes-insecure.html" + ] + }, + { + "title": "OpenShift Deployment", + "urls": [ + "/${VERSION}/deploy-cockroachdb-with-kubernetes-openshift.html" + ] + }, + { + "title": "Multi-Cluster Deployment", + "urls": [ + "/${VERSION}/orchestrate-cockroachdb-with-kubernetes-multi-cluster.html" + ] + }, + { + "title": "Operate on Kubernetes", + "items": [ + { + "title": "Pod Scheduling", + "urls": [ + "/${VERSION}/schedule-cockroachdb-kubernetes.html" + ] + }, + { + "title": "Resource Management", + "urls": [ + "/${VERSION}/configure-cockroachdb-kubernetes.html" + ] + }, + { + "title": "Certificate Management", + "urls": [ + "/${VERSION}/secure-cockroachdb-kubernetes.html" + ] + }, + { + "title": "Cluster Scaling", + "urls": [ + "/${VERSION}/scale-cockroachdb-kubernetes.html" + ] + }, + { + "title": "Cluster Monitoring", + "urls": [ + "/${VERSION}/monitor-cockroachdb-kubernetes.html" + ] + }, + { + "title": "Cluster Upgrades", + "urls": [ + "/${VERSION}/upgrade-cockroachdb-kubernetes.html" + ] + }, + { + "title": "Kubernetes Performance", + "urls": [ + "/${VERSION}/kubernetes-performance.html" + ] + } + ] + } + ] + }, + { + "title": "Multi-Region for Self-Hosted Deployments", + "items": [ + { + "title": "Topology Patterns Overview", + "urls": [ + "/${VERSION}/topology-patterns.html" + ] + }, + { + "title": "Development Topology", + "urls": [ + "/${VERSION}/topology-development.html" + ] + }, + { + "title": "Basic Production Topology", + "urls": [ + "/${VERSION}/topology-basic-production.html" + ] + }, + { + "title": "Follower Reads Topology", + "urls": [ + "/${VERSION}/topology-follower-reads.html" + ] + }, + { + "title": "Follow-the-Workload Topology", + "urls": [ + "/${VERSION}/topology-follow-the-workload.html" + ] + }, + { + "title": "Regional Tables", + "urls": [ + "/${VERSION}/regional-tables.html" + ] + }, + { + "title": "Global Tables", + "urls": [ + "/${VERSION}/global-tables.html" + ] + } + ] + }, + { + "title": "Security", + "items": [ + { + "title": "Certificates", + "items": [ + { + "title": "Use the CockroachDB CLI to provision a development cluster", + "urls": [ + "/${VERSION}/manage-certs-cli.html" + ] + }, + { + "title": "Manage PKI certificates with HashiCorp Vault", + "urls": [ + "/${VERSION}/manage-certs-vault.html" + ] + }, + { + "title": "Create Security Certificates using OpenSSL", + "urls": [ + "/${VERSION}/create-security-certificates-openssl.html" + ] + }, + { + "title": "Use Online Certificate Status Protocol (OCSP)", + "urls": [ + "/${VERSION}/manage-certs-revoke-ocsp.html" + ] + }, + { + "title": "Certificate-based authentication using multiple values from the X.509 SUBJECT field", + "urls": [ + "/${VERSION}/certificate-based-authentication-using-the-x509-subject-field.html" + ] + } + ] + }, + { + "title": "Authentication", + "urls": [ + "/${VERSION}/authentication.html" + ] + }, + { + "title": "Encryption", + "urls": [ + "/${VERSION}/encryption.html" + ] + }, + { + "title": "Authorization", + "urls": [ + "/${VERSION}/authorization.html" + ] + }, + { + "title": "Table-based SQL Audit Logging", + "urls": [ + "/${VERSION}/sql-audit-logging.html" + ] + }, + { + "title": "Role-based SQL Audit Logging", + "urls": [ + "/${VERSION}/role-based-audit-logging.html" + ] + }, + { + "title": "LDAP Authentication", + "urls": [ + "/${VERSION}/ldap-authentication.html" + ] + }, + { + "title": "LDAP Authorization", + "urls": [ + "/${VERSION}/ldap-authorization.html" + ] + }, + { + "title": "GSSAPI Authentication", + "urls": [ + "/${VERSION}/gssapi_authentication.html" + ] + }, + { + "title": "Cluster SSO using JWT", + "urls": [ + "/${VERSION}/sso-sql.html" + ] + }, + { + "title": "SSO for DB Console", + "urls": [ + "/${VERSION}/sso-db-console.html" + ] + }, + { + "title": "Rotate Security Certificates", + "urls": [ + "/${VERSION}/rotate-certificates.html" + ] + }, + { + "title": "Security Tutorials", + "items": [ + { + "title": "Use Hashicorp Vault's Dynamic Secrets", + "urls": [ + "/${VERSION}/vault-db-secrets-tutorial.html" + ] + } + ] + } + ] + }, + { + "title": "Monitoring and Alerting", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/monitoring-and-alerting.html" + ] + }, + { + "title": "Common Issues to Monitor", + "urls": [ + "/${VERSION}/common-issues-to-monitor.html" + ] + }, + { + "title": "Enable the Node Map", + "urls": [ + "/${VERSION}/enable-node-map.html" + ] + }, + { + "title": "Use Prometheus and Alertmanager", + "urls": [ + "/${VERSION}/monitor-cockroachdb-with-prometheus.html" + ] + }, + { + "title": "Monitor CockroachDB {{ site.data.products.core }} with Datadog", + "urls": [ + "/${VERSION}/datadog.html" + ] + }, + { + "title": "Monitor CockroachDB {{ site.data.products.core }} with DBmarlin", + "urls": [ + "/${VERSION}/dbmarlin.html" + ] + }, + { + "title": "Monitor CockroachDB {{ site.data.products.core }} with Kibana", + "urls": [ + "/${VERSION}/kibana.html" + ] + }, + { + "title": "Essential Metrics for CockroachDB {{ site.data.products.core }} Deployments", + "urls": [ + "/${VERSION}/essential-metrics-self-hosted.html" + ] + }, + { + "title": "Essential Alerts for CockroachDB {{ site.data.products.core }} Deployments", + "urls": [ + "/${VERSION}/essential-alerts-self-hosted.html" + ] + } + ] + }, + { + "title": "Administration", + "items": [ + { + "title": "DB Console Overview", + "urls": [ + "/${VERSION}/ui-overview.html" + ] + }, + { + "title": "Cluster Overview Page", + "urls": [ + "/${VERSION}/ui-cluster-overview-page.html" + ] + }, + { + "title": "Metrics Dashboards", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/ui-overview-dashboard.html" + ] + }, + { + "title": "Hardware", + "urls": [ + "/${VERSION}/ui-hardware-dashboard.html" + ] + }, + { + "title": "Runtime", + "urls": [ + "/${VERSION}/ui-runtime-dashboard.html" + ] + }, + { + "title": "Networking", + "urls": [ + "/${VERSION}/ui-networking-dashboard.html" + ] + }, + { + "title": "SQL", + "urls": [ + "/${VERSION}/ui-sql-dashboard.html" + ] + }, + { + "title": "Storage", + "urls": [ + "/${VERSION}/ui-storage-dashboard.html" + ] + }, + { + "title": "Replication", + "urls": [ + "/${VERSION}/ui-replication-dashboard.html" + ] + }, + { + "title": "Distributed", + "urls": [ + "/${VERSION}/ui-distributed-dashboard.html" + ] + }, + { + "title": "Queues", + "urls": [ + "/${VERSION}/ui-queues-dashboard.html" + ] + }, + { + "title": "Slow Requests", + "urls": [ + "/${VERSION}/ui-slow-requests-dashboard.html" + ] + }, + { + "title": "Changefeeds", + "urls": [ + "/${VERSION}/ui-cdc-dashboard.html" + ] + }, + { + "title": "Overload", + "urls": [ + "/${VERSION}/ui-overload-dashboard.html" + ] + }, + { + "title": "TTL", + "urls": [ + "/${VERSION}/ui-ttl-dashboard.html" + ] + }, + { + "title": "Physical Cluster Replication", + "urls": [ + "/${VERSION}/ui-physical-cluster-replication-dashboard.html" + ] + }, + { + "title": "Logical Data Replication", + "urls": [ + "/${VERSION}/ui-logical-data-replication-dashboard.html" + ] + }, + { + "title": "Custom Chart", + "urls": [ + "/${VERSION}/ui-custom-chart-debug-page.html" + ] + } + ] + }, + { + "title": "Databases Page", + "urls": [ + "/${VERSION}/ui-databases-page.html" + ] + }, + { + "title": "Sessions Page", + "urls": [ + "/${VERSION}/ui-sessions-page.html" + ] + }, + { + "title": "Statements Page", + "urls": [ + "/${VERSION}/ui-statements-page.html" + ] + }, + { + "title": "Transactions Page", + "urls": [ + "/${VERSION}/ui-transactions-page.html" + ] + }, + { + "title": "Insights Page", + "urls": [ + "/${VERSION}/ui-insights-page.html" + ] + }, + { + "title": "Network Page", + "urls": [ + "/${VERSION}/ui-network-latency-page.html" + ] + }, + { + "title": "Hot Ranges Page", + "urls": [ + "/${VERSION}/ui-hot-ranges-page.html" + ] + }, + { + "title": "Jobs Page", + "urls": [ + "/${VERSION}/ui-jobs-page.html" + ] + }, + { + "title": "Schedules Page", + "urls": [ + "/${VERSION}/ui-schedules-page.html" + ] + }, + { + "title": "Advanced Debug Page", + "urls": [ + "/${VERSION}/ui-debug-pages.html" + ] + }, + { + "title": "Key Visualizer", + "urls": [ + "/${VERSION}/ui-key-visualizer.html" + ] + }, + { + "title": "WAL Failover", + "urls": [ + "/${VERSION}/wal-failover.html" + ] + } + ] + }, + { + "title": "Backups and Restores", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/backup-and-restore-overview.html" + ] + }, + { + "title": "Full and Incremental Backups", + "urls": [ + "/${VERSION}/take-full-and-incremental-backups.html" + ] + }, + { + "title": "Backups with Revision History and Point-in-time Restore", + "urls": [ + "/${VERSION}/take-backups-with-revision-history-and-restore-from-a-point-in-time.html" + ] + }, + { + "title": "Encrypted Backup and Restore", + "urls": [ + "/${VERSION}/take-and-restore-encrypted-backups.html" + ] + }, + { + "title": "Locality-restricted Backup Execution", + "urls": [ + "/${VERSION}/take-locality-restricted-backups.html" + ] + }, + { + "title": "Locality-aware Backup and Restore", + "urls": [ + "/${VERSION}/take-and-restore-locality-aware-backups.html" + ] + }, + { + "title": "Scheduled Backups", + "urls": [ + "/${VERSION}/manage-a-backup-schedule.html" + ] + }, + { + "title": "Backup Validation", + "urls": [ + "/${VERSION}/backup-validation.html" + ] + }, + { + "title": "Expire Past Backups", + "urls": [ + "/${VERSION}/expire-past-backups.html" + ] + }, + { + "title": "Backup and Restore Monitoring", + "urls": [ + "/${VERSION}/backup-and-restore-monitoring.html" + ] + } + ] + }, + { + "title": "File Storage", + "items": [ + { + "title": "Cloud Storage", + "urls": [ + "/${VERSION}/use-cloud-storage.html" + ] + }, + { + "title": "Cloud Storage Authentication", + "urls": [ + "/${VERSION}/cloud-storage-authentication.html" + ] + }, + { + "title": "Userfile Storage", + "urls": [ + "/${VERSION}/use-userfile-storage.html" + ] + }, + { + "title": "Local File Server", + "urls": [ + "/${VERSION}/use-a-local-file-server.html" + ] + } + ] + }, + { + "title": "Cluster Maintenance", + "items": [ + { + "title": "Upgrade CockroachDB", + "urls": [ + "/${VERSION}/upgrade-cockroach-version.html" + ] + }, + { + "title": "Disaster Recovery Planning", + "urls": [ + "/${VERSION}/disaster-recovery-planning.html" + ] + }, + { + "title": "Restoring Backups Across Versions", + "urls": [ + "/${VERSION}/restoring-backups-across-versions.html" + ] + }, + { + "title": "Manage Long-Running Queries", + "urls": [ + "/${VERSION}/manage-long-running-queries.html" + ] + }, + { + "title": "Node Shutdown", + "urls": [ + "/${VERSION}/node-shutdown.html" + ] + } + ] + }, + { + "title": "Logs", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/logging-overview.html" + ] + }, + { + "title": "Configure Logs", + "urls": [ + "/${VERSION}/configure-logs.html" + ] + }, + { + "title": "Logging Use Cases", + "urls": [ + "/${VERSION}/logging-use-cases.html" + ] + }, + { + "title": "Log SQL Activity to Datadog", + "urls": [ + "/${VERSION}/log-sql-activity-to-datadog.html" + ] + }, + { + "title": "Logging Best Practices", + "urls": [ + "/${VERSION}/logging-best-practices.html" + ] + }, + { + "title": "Critical Log Messages", + "urls": [ + "/${VERSION}/critical-log-messages.html" + ] + } + ] + }, + { + "title": "Cluster API", + "urls": [ + "/${VERSION}/cluster-api.html" + ] + } + ] +} diff --git a/src/current/_includes/v25.3/sidebar-data/sql.json b/src/current/_includes/v25.3/sidebar-data/sql.json new file mode 100644 index 00000000000..b4b9e2a71b6 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/sql.json @@ -0,0 +1,1320 @@ +{ + "title": "SQL", + "is_top_level": true, + "items": [{ + "title": "Statements", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/sql-statements.html" + ] + }, + { + "title": "ALTER BACKUP", + "urls": [ + "/${VERSION}/alter-backup.html" + ] + }, + { + "title": "ALTER BACKUP SCHEDULE", + "urls": [ + "/${VERSION}/alter-backup-schedule.html" + ] + }, + { + "title": "ALTER CHANGEFEED", + "urls": [ + "/${VERSION}/alter-changefeed.html" + ] + }, + { + "title": "ALTER DATABASE", + "urls": [ + "/${VERSION}/alter-database.html" + ] + }, + { + "title": "ALTER DEFAULT PRIVILEGES", + "urls": [ + "/${VERSION}/alter-default-privileges.html" + ] + }, + { + "title": "ALTER FUNCTION", + "urls": [ + "/${VERSION}/alter-function.html" + ] + }, + { + "title": "ALTER INDEX", + "urls": [ + "/${VERSION}/alter-index.html" + ] + }, + { + "title": "ALTER JOB", + "urls": [ + "/${VERSION}/alter-job.html" + ] + }, + { + "title": "ALTER PARTITION", + "urls": [ + "/${VERSION}/alter-partition.html" + ] + }, + { + "title": "ALTER POLICY", + "urls": [ + "/${VERSION}/alter-policy.html" + ] + }, + { + "title": "ALTER PROCEDURE", + "urls": [ + "/${VERSION}/alter-procedure.html" + ] + }, + { + "title": "ALTER RANGE", + "urls": [ + "/${VERSION}/alter-range.html" + ] + }, + { + "title": "ALTER ROLE", + "urls": [ + "/${VERSION}/alter-role.html" + ] + }, + { + "title": "ALTER SCHEMA", + "urls": [ + "/${VERSION}/alter-schema.html" + ] + }, + { + "title": "ALTER SEQUENCE", + "urls": [ + "/${VERSION}/alter-sequence.html" + ] + }, + { + "title": "ALTER TABLE", + "urls": [ + "/${VERSION}/alter-table.html" + ] + }, + { + "title": "ALTER TYPE", + "urls": [ + "/${VERSION}/alter-type.html" + ] + }, + { + "title": "ALTER USER", + "urls": [ + "/${VERSION}/alter-user.html" + ] + }, + { + "title": "ALTER VIEW", + "urls": [ + "/${VERSION}/alter-view.html" + ] + }, + { + "title": "ALTER VIRTUAL CLUSTER", + "urls": [ + "/${VERSION}/alter-virtual-cluster.html" + ] + }, + { + "title": "BACKUP", + "urls": [ + "/${VERSION}/backup.html" + ] + }, + { + "title": "BEGIN", + "urls": [ + "/${VERSION}/begin-transaction.html" + ] + }, + { + "title": "CALL", + "urls": [ + "/${VERSION}/call.html" + ] + }, + { + "title": "CANCEL JOB", + "urls": [ + "/${VERSION}/cancel-job.html" + ] + }, + { + "title": "CANCEL QUERY", + "urls": [ + "/${VERSION}/cancel-query.html" + ] + }, + { + "title": "CANCEL SESSION", + "urls": [ + "/${VERSION}/cancel-session.html" + ] + }, + { + "title": "COMMENT ON", + "urls": [ + "/${VERSION}/comment-on.html" + ] + }, + { + "title": "COMMIT", + "urls": [ + "/${VERSION}/commit-transaction.html" + ] + }, + { + "title": "COPY", + "urls": [ + "/${VERSION}/copy.html" + ] + }, + { + "title": "CREATE CHANGEFEED", + "urls": [ + "/${VERSION}/create-changefeed.html" + ] + }, + { + "title": "CREATE DATABASE", + "urls": [ + "/${VERSION}/create-database.html" + ] + }, + { + "title": "CREATE EXTERNAL CONNECTION", + "urls": [ + "/${VERSION}/create-external-connection.html" + ] + }, + { + "title": "CREATE FUNCTION", + "urls": [ + "/${VERSION}/create-function.html" + ] + }, + { + "title": "CREATE INDEX", + "urls": [ + "/${VERSION}/create-index.html" + ] + }, + { + "title": "CREATE LOGICALLY REPLICATED", + "urls": [ + "/${VERSION}/create-logically-replicated.html" + ] + }, + { + "title": "CREATE LOGICAL REPLICATION STREAM", + "urls": [ + "/${VERSION}/create-logical-replication-stream.html" + ] + }, + { + "title": "CREATE POLICY", + "urls": [ + "/${VERSION}/create-policy.html" + ] + }, + { + "title": "CREATE PROCEDURE", + "urls": [ + "/${VERSION}/create-procedure.html" + ] + }, + { + "title": "CREATE ROLE", + "urls": [ + "/${VERSION}/create-role.html" + ] + }, + { + "title": "CREATE SCHEDULE FOR BACKUP", + "urls": [ + "/${VERSION}/create-schedule-for-backup.html" + ] + }, + { + "title": "CREATE SCHEDULE FOR CHANGEFEED", + "urls": [ + "/${VERSION}/create-schedule-for-changefeed.html" + ] + }, + { + "title": "CREATE SCHEMA", + "urls": [ + "/${VERSION}/create-schema.html" + ] + }, + { + "title": "CREATE SEQUENCE", + "urls": [ + "/${VERSION}/create-sequence.html" + ] + }, + { + "title": "CREATE STATISTICS", + "urls": [ + "/${VERSION}/create-statistics.html" + ] + }, + { + "title": "CREATE TABLE", + "urls": [ + "/${VERSION}/create-table.html" + ] + }, + { + "title": "CREATE TABLE AS", + "urls": [ + "/${VERSION}/create-table-as.html" + ] + }, + { + "title": "CREATE TRIGGER", + "urls": [ + "/${VERSION}/create-trigger.html" + ] + }, + { + "title": "CREATE TYPE", + "urls": [ + "/${VERSION}/create-type.html" + ] + }, + { + "title": "CREATE USER", + "urls": [ + "/${VERSION}/create-user.html" + ] + }, + { + "title": "CREATE VIEW", + "urls": [ + "/${VERSION}/create-view.html" + ] + }, + { + "title": "CREATE VIRTUAL CLUSTER", + "urls": [ + "/${VERSION}/create-virtual-cluster.html" + ] + }, + { + "title": "DELETE", + "urls": [ + "/${VERSION}/delete.html" + ] + }, + { + "title": "DO", + "urls": [ + "/${VERSION}/do.html" + ] + }, + { + "title": "DROP DATABASE", + "urls": [ + "/${VERSION}/drop-database.html" + ] + }, + { + "title": "DROP EXTERNAL CONNECTION", + "urls": [ + "/${VERSION}/drop-external-connection.html" + ] + }, + { + "title": "DROP FUNCTION", + "urls": [ + "/${VERSION}/drop-function.html" + ] + }, + { + "title": "DROP OWNED BY", + "urls": [ + "/${VERSION}/drop-owned-by.html" + ] + }, + { + "title": "DROP POLICY", + "urls": [ + "/${VERSION}/drop-policy.html" + ] + }, + { + "title": "DROP TRIGGER", + "urls": [ + "/${VERSION}/drop-trigger.html" + ] + }, + { + "title": "DROP TYPE", + "urls": [ + "/${VERSION}/drop-type.html" + ] + }, + { + "title": "DROP INDEX", + "urls": [ + "/${VERSION}/drop-index.html" + ] + }, + { + "title": "DROP PROCEDURE", + "urls": [ + "/${VERSION}/drop-procedure.html" + ] + }, + { + "title": "DROP ROLE", + "urls": [ + "/${VERSION}/drop-role.html" + ] + }, + { + "title": "DROP SCHEDULES", + "urls": [ + "/${VERSION}/drop-schedules.html" + ] + }, + { + "title": "DROP SCHEMA", + "urls": [ + "/${VERSION}/drop-schema.html" + ] + }, + { + "title": "DROP SEQUENCE", + "urls": [ + "/${VERSION}/drop-sequence.html" + ] + }, + { + "title": "DROP TABLE", + "urls": [ + "/${VERSION}/drop-table.html" + ] + }, + { + "title": "DROP USER", + "urls": [ + "/${VERSION}/drop-user.html" + ] + }, + { + "title": "DROP VIEW", + "urls": [ + "/${VERSION}/drop-view.html" + ] + }, + { + "title": "DROP VIRTUAL CLUSTER", + "urls": [ + "/${VERSION}/drop-virtual-cluster.html" + ] + }, + { + "title": "EXPERIMENTAL CHANGEFEED FOR", + "urls": [ + "/${VERSION}/changefeed-for.html" + ] + }, + { + "title": "EXPLAIN", + "urls": [ + "/${VERSION}/explain.html" + ] + }, + { + "title": "EXPLAIN ANALYZE", + "urls": [ + "/${VERSION}/explain-analyze.html" + ] + }, + { + "title": "EXPORT", + "urls": [ + "/${VERSION}/export.html" + ] + }, + { + "title": "GRANT", + "urls": [ + "/${VERSION}/grant.html" + ] + }, + { + "title": "IMPORT INTO", + "urls": [ + "/${VERSION}/import-into.html" + ] + }, + { + "title": "INSERT", + "urls": [ + "/${VERSION}/insert.html" + ] + }, + { + "title": "JOIN", + "urls": [ + "/${VERSION}/joins.html" + ] + }, + { + "title": "LIMIT/OFFSET", + "urls": [ + "/${VERSION}/limit-offset.html" + ] + }, + { + "title": "ORDER BY", + "urls": [ + "/${VERSION}/order-by.html" + ] + }, + { + "title": "PAUSE JOB", + "urls": [ + "/${VERSION}/pause-job.html" + ] + }, + { + "title": "PAUSE SCHEDULES", + "urls": [ + "/${VERSION}/pause-schedules.html" + ] + }, + { + "title": "REASSIGN OWNED", + "urls": [ + "/${VERSION}/reassign-owned.html" + ] + }, + { + "title": "REFRESH", + "urls": [ + "/${VERSION}/refresh.html" + ] + }, + { + "title": "RELEASE SAVEPOINT", + "urls": [ + "/${VERSION}/release-savepoint.html" + ] + }, + { + "title": "RESET CLUSTER SETTING", + "urls": [ + "/${VERSION}/reset-cluster-setting.html" + ] + }, + { + "title": "RESET {session variable}", + "urls": [ + "/${VERSION}/reset-vars.html" + ] + }, + { + "title": "RESTORE", + "urls": [ + "/${VERSION}/restore.html" + ] + }, + { + "title": "RESUME JOB", + "urls": [ + "/${VERSION}/resume-job.html" + ] + }, + { + "title": "RESUME SCHEDULES", + "urls": [ + "/${VERSION}/resume-schedules.html" + ] + }, + { + "title": "REVOKE", + "urls": [ + "/${VERSION}/revoke.html" + ] + }, + { + "title": "ROLLBACK", + "urls": [ + "/${VERSION}/rollback-transaction.html" + ] + }, + { + "title": "SAVEPOINT", + "urls": [ + "/${VERSION}/savepoint.html" + ] + }, + { + "title": "SELECT", + "urls": [ + "/${VERSION}/select-clause.html" + ] + }, + { + "title": "FOR UPDATE and FOR SHARE", + "urls": [ + "/${VERSION}/select-for-update.html" + ] + }, + { + "title": "SET CLUSTER SETTING", + "urls": [ + "/${VERSION}/set-cluster-setting.html" + ] + }, + { + "title": "SET {session variable}", + "urls": [ + "/${VERSION}/set-vars.html" + ] + }, + { + "title": "SET TRANSACTION", + "urls": [ + "/${VERSION}/set-transaction.html" + ] + }, + { + "title": "SHOW BACKUP", + "urls": [ + "/${VERSION}/show-backup.html" + ] + }, + { + "title": "SHOW CLUSTER SETTING", + "urls": [ + "/${VERSION}/show-cluster-setting.html" + ] + }, + { + "title": "SHOW COLUMNS", + "urls": [ + "/${VERSION}/show-columns.html" + ] + }, + { + "title": "SHOW CONSTRAINTS", + "urls": [ + "/${VERSION}/show-constraints.html" + ] + }, + { + "title": "SHOW CREATE", + "urls": [ + "/${VERSION}/show-create.html" + ] + }, + { + "title": "SHOW CREATE EXTERNAL CONNECTION", + "urls": [ + "/${VERSION}/show-create-external-connection.html" + ] + }, + { + "title": "SHOW CREATE SCHEDULE", + "urls": [ + "/${VERSION}/show-create-schedule.html" + ] + }, + { + "title": "SHOW DATABASES", + "urls": [ + "/${VERSION}/show-databases.html" + ] + }, + { + "title": "SHOW DEFAULT PRIVILEGES", + "urls": [ + "/${VERSION}/show-default-privileges.html" + ] + }, + { + "title": "SHOW DEFAULT SESSION VARIABLES FOR ROLE", + "urls": [ + "/${VERSION}/show-default-session-variables-for-role.html" + ] + }, + { + "title": "SHOW ENUMS", + "urls": [ + "/${VERSION}/show-enums.html" + ] + }, + { + "title": "SHOW EXTERNAL CONNECTION", + "urls": [ + "/${VERSION}/show-external-connection.html" + ] + }, + { + "title": "SHOW FULL TABLE SCANS", + "urls": [ + "/${VERSION}/show-full-table-scans.html" + ] + }, + { + "title": "SHOW GRANTS", + "urls": [ + "/${VERSION}/show-grants.html" + ] + }, + { + "title": "SHOW INDEX", + "urls": [ + "/${VERSION}/show-index.html" + ] + }, + { + "title": "SHOW JOBS", + "urls": [ + "/${VERSION}/show-jobs.html" + ] + }, + { + "title": "SHOW LOCALITY", + "urls": [ + "/${VERSION}/show-locality.html" + ] + }, + { + "title": "SHOW LOGICAL REPLICATION JOBS", + "urls": [ + "/${VERSION}/show-logical-replication-jobs.html" + ] + }, + { + "title": "SHOW PARTITIONS", + "urls": [ + "/${VERSION}/show-partitions.html" + ] + }, + { + "title": "SHOW POLICIES", + "urls": [ + "/${VERSION}/show-policies.html" + ] + }, + { + "title": "SHOW RANGES", + "urls": [ + "/${VERSION}/show-ranges.html" + ] + }, + { + "title": "SHOW RANGE FOR ROW", + "urls": [ + "/${VERSION}/show-range-for-row.html" + ] + }, + { + "title": "SHOW REGIONS", + "urls": [ + "/${VERSION}/show-regions.html" + ] + }, + { + "title": "SHOW {session variable}", + "urls": [ + "/${VERSION}/show-vars.html" + ] + }, + { + "title": "SHOW SUPER REGIONS", + "urls": [ + "/${VERSION}/show-super-regions.html" + ] + }, + { + "title": "SHOW SYSTEM GRANTS", + "urls": [ + "/${VERSION}/show-system-grants.html" + ] + }, + { + "title": "SHOW ROLES", + "urls": [ + "/${VERSION}/show-roles.html" + ] + }, + { + "title": "SHOW SCHEDULES", + "urls": [ + "/${VERSION}/show-schedules.html" + ] + }, + { + "title": "SHOW SCHEMAS", + "urls": [ + "/${VERSION}/show-schemas.html" + ] + }, + { + "title": "SHOW SEQUENCES", + "urls": [ + "/${VERSION}/show-sequences.html" + ] + }, + { + "title": "SHOW SESSIONS", + "urls": [ + "/${VERSION}/show-sessions.html" + ] + }, + { + "title": "SHOW STATEMENTS", + "urls": [ + "/${VERSION}/show-statements.html" + ] + }, + { + "title": "SHOW STATISTICS", + "urls": [ + "/${VERSION}/show-statistics.html" + ] + }, + { + "title": "SHOW SAVEPOINT STATUS", + "urls": [ + "/${VERSION}/show-savepoint-status.html" + ] + }, + { + "title": "SHOW TABLES", + "urls": [ + "/${VERSION}/show-tables.html" + ] + }, + { + "title": "SHOW TRACE FOR SESSION", + "urls": [ + "/${VERSION}/show-trace.html" + ] + }, + { + "title": "SHOW TRANSACTIONS", + "urls": [ + "/${VERSION}/show-transactions.html" + ] + }, + { + "title": "SHOW TYPES", + "urls": [ + "/${VERSION}/show-types.html" + ] + }, + { + "title": "SHOW USERS", + "urls": [ + "/${VERSION}/show-users.html" + ] + }, + { + "title": "SHOW VIRTUAL CLUSTER", + "urls": [ + "/${VERSION}/show-virtual-cluster.html" + ] + }, + { + "title": "SHOW ZONE CONFIGURATIONS", + "urls": [ + "/${VERSION}/show-zone-configurations.html" + ] + }, + { + "title": "TRUNCATE", + "urls": [ + "/${VERSION}/truncate.html" + ] + }, + { + "title": "UPDATE", + "urls": [ + "/${VERSION}/update.html" + ] + }, + { + "title": "UPSERT", + "urls": [ + "/${VERSION}/upsert.html" + ] + }, + { + "title": "WITH {storage parameter}", + "urls": [ + "/${VERSION}/with-storage-parameter.html" + ] + } + ] + }, + { + "title": "Syntax", + "items": [ + { + "title": "Keywords & Identifiers", + "urls": [ + "/${VERSION}/keywords-and-identifiers.html" + ] + }, + { + "title": "Constants", + "urls": [ + "/${VERSION}/sql-constants.html" + ] + }, + { + "title": "Selection Queries", + "urls": [ + "/${VERSION}/selection-queries.html" + ] + }, + { + "title": "Cursors", + "urls": [ + "/${VERSION}/cursors.html" + ] + }, + { + "title": "Table Expressions", + "urls": [ + "/${VERSION}/table-expressions.html" + ] + }, + { + "title": "Common Table Expressions", + "urls": [ + "/${VERSION}/common-table-expressions.html" + ] + }, + { + "title": "JSONPath Queries", + "urls": [ + "/${VERSION}/jsonpath.html" + ] + }, + { + "title": "Name Resolution", + "urls": [ + "/${VERSION}/sql-name-resolution.html" + ] + }, + { + "title": "NULL Handling", + "urls": [ + "/${VERSION}/null-handling.html" + ] + }, + { + "title": "Scalar Expressions", + "urls": [ + "/${VERSION}/scalar-expressions.html" + ] + }, + { + "title": "User-Defined Functions", + "urls": [ + "/${VERSION}/user-defined-functions.html" + ] + }, + { + "title": "Stored Procedures", + "urls": [ + "/${VERSION}/stored-procedures.html" + ] + }, + { + "title": "Triggers", + "urls": [ + "/${VERSION}/triggers.html" + ] + }, + { + "title": "Window Functions", + "urls": [ + "/${VERSION}/window-functions.html" + ] + }, + { + "title": "Full SQL Grammar", + "urls": [ + "/${VERSION}/sql-grammar.html" + ] + } + ] + }, + { + "title": "Data Types", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/data-types.html" + ] + }, + { + "title": "ARRAY", + "urls": [ + "/${VERSION}/array.html" + ] + }, + { + "title": "BIT", + "urls": [ + "/${VERSION}/bit.html" + ] + }, + { + "title": "BOOL", + "urls": [ + "/${VERSION}/bool.html" + ] + }, + { + "title": "BYTES", + "urls": [ + "/${VERSION}/bytes.html" + ] + }, + { + "title": "COLLATE", + "urls": [ + "/${VERSION}/collate.html" + ] + }, + { + "title": "DATE", + "urls": [ + "/${VERSION}/date.html" + ] + }, + { + "title": "DECIMAL", + "urls": [ + "/${VERSION}/decimal.html" + ] + }, + { + "title": "ENUM", + "urls": [ + "/${VERSION}/enum.html" + ] + }, + { + "title": "FLOAT", + "urls": [ + "/${VERSION}/float.html" + ] + }, + { + "title": "INET", + "urls": [ + "/${VERSION}/inet.html" + ] + }, + { + "title": "INT", + "urls": [ + "/${VERSION}/int.html" + ] + }, + { + "title": "INTERVAL", + "urls": [ + "/${VERSION}/interval.html" + ] + }, + { + "title": "JSONB", + "urls": [ + "/${VERSION}/jsonb.html" + ] + }, + { + "title": "OID", + "urls": [ + "/${VERSION}/oid.html" + ] + }, + { + "title": "SERIAL", + "urls": [ + "/${VERSION}/serial.html" + ] + }, + { + "title": "STRING", + "urls": [ + "/${VERSION}/string.html" + ] + }, + { + "title": "TIME", + "urls": [ + "/${VERSION}/time.html" + ] + }, + { + "title": "TIMESTAMP", + "urls": [ + "/${VERSION}/timestamp.html" + ] + }, + { + "title": "TSQUERY", + "urls": [ + "/${VERSION}/tsquery.html" + ] + }, + { + "title": "TSVECTOR", + "urls": [ + "/${VERSION}/tsvector.html" + ] + }, + { + "title": "UUID", + "urls": [ + "/${VERSION}/uuid.html" + ] + }, + { + "title": "VECTOR", + "urls": [ + "/${VERSION}/vector.html" + ] + } + ] + }, + { + "title": "Spatial Data", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/spatial-data-overview.html" + ] + }, + { + "title": "POINT", + "urls": [ + "/${VERSION}/point.html" + ] + }, + { + "title": "LINESTRING", + "urls": [ + "/${VERSION}/linestring.html" + ] + }, + { + "title": "POLYGON", + "urls": [ + "/${VERSION}/polygon.html" + ] + }, + { + "title": "MULTIPOINT", + "urls": [ + "/${VERSION}/multipoint.html" + ] + }, + { + "title": "MULTILINESTRING", + "urls": [ + "/${VERSION}/multilinestring.html" + ] + }, + { + "title": "MULTIPOLYGON", + "urls": [ + "/${VERSION}/multipolygon.html" + ] + }, + { + "title": "GEOMETRYCOLLECTION", + "urls": [ + "/${VERSION}/geometrycollection.html" + ] + }, + { + "title": "Well Known Text (WKT)", + "urls": [ + "/${VERSION}/well-known-text.html" + ] + }, + { + "title": "Well Known Binary (WKB)", + "urls": [ + "/${VERSION}/well-known-binary.html" + ] + }, + { + "title": "GeoJSON", + "urls": [ + "/${VERSION}/geojson.html" + ] + }, + { + "title": "SRID 4326 - longitude and latitude", + "urls": [ + "/${VERSION}/srid-4326.html" + ] + }, + { + "title": "ST_Contains", + "urls": [ + "/${VERSION}/st_contains.html" + ] + }, + { + "title": "ST_Within", + "urls": [ + "/${VERSION}/st_within.html" + ] + }, + { + "title": "ST_Intersects", + "urls": [ + "/${VERSION}/st_intersects.html" + ] + }, + { + "title": "ST_CoveredBy", + "urls": [ + "/${VERSION}/st_coveredby.html" + ] + }, + { + "title": "ST_Covers", + "urls": [ + "/${VERSION}/st_covers.html" + ] + }, + { + "title": "ST_Disjoint", + "urls": [ + "/${VERSION}/st_disjoint.html" + ] + }, + { + "title": "ST_Equals", + "urls": [ + "/${VERSION}/st_equals.html" + ] + }, + { + "title": "ST_Overlaps", + "urls": [ + "/${VERSION}/st_overlaps.html" + ] + }, + { + "title": "ST_Touches", + "urls": [ + "/${VERSION}/st_touches.html" + ] + }, + { + "title": "ST_ConvexHull", + "urls": [ + "/${VERSION}/st_convexhull.html" + ] + }, + { + "title": "ST_Union", + "urls": [ + "/${VERSION}/st_union.html" + ] + } + ] + }, + { + "title": "Constraints", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/constraints.html" + ] + }, + { + "title": "Check", + "urls": [ + "/${VERSION}/check.html" + ] + }, + { + "title": "Default Value", + "urls": [ + "/${VERSION}/default-value.html" + ] + }, + { + "title": "Foreign Key", + "urls": [ + "/${VERSION}/foreign-key.html" + ] + }, + { + "title": "Not Null", + "urls": [ + "/${VERSION}/not-null.html" + ] + }, + { + "title": "Primary Key", + "urls": [ + "/${VERSION}/primary-key.html" + ] + }, + { + "title": "Unique", + "urls": [ + "/${VERSION}/unique.html" + ] + } + ] + }, + { + "title": "Functions & Operators", + "urls": [ + "/${VERSION}/functions-and-operators.html" + ] + }, + { + "title": "Session Variables", + "urls": [ + "/${VERSION}/session-variables.html" + ] + }, + { + "title": "PL/pgSQL", + "urls": [ + "/${VERSION}/plpgsql.html" + ] + } + ] + } diff --git a/src/current/_includes/v25.3/sidebar-data/stream-data.json b/src/current/_includes/v25.3/sidebar-data/stream-data.json new file mode 100644 index 00000000000..020861b68be --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/stream-data.json @@ -0,0 +1,168 @@ +{ + "title": "Stream Data", + "is_top_level": true, + "items": [ + { + "title": "Change Data Capture Overview", + "urls": [ + "/${VERSION}/change-data-capture-overview.html" + ] + }, + { + "title": "Get Started with Changefeeds", + "items": [ + { + "title": "Create and Configure Changefeeds", + "urls": [ + "/${VERSION}/create-and-configure-changefeeds.html" + ] + }, + { + "title": "Changefeed Best Practices", + "urls": [ + "/${VERSION}/changefeed-best-practices.html" + ] + }, + { + "title": "Changefeed Messages", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/changefeed-messages.html" + ] + }, + { + "title": "Message Envelope", + "urls":[ + "/${VERSION}/changefeed-message-envelopes.html" + ] + } + ] + }, + { + "title": "Changefeed Sinks", + "urls": [ + "/${VERSION}/changefeed-sinks.html" + ] + }, + { + "title": "Changefeed Examples", + "urls": [ + "/${VERSION}/changefeed-examples.html" + ] + } + ] + }, + { + "title": "Monitor Changefeeds", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/monitor-and-debug-changefeeds.html" + ] + }, + { + "title": "Monitoring Guide", + "urls": [ + "/${VERSION}/changefeed-monitoring-guide.html" + ] + }, + { + "title": "Protect Changefeed Data", + "urls": [ + "/${VERSION}/protect-changefeed-data.html" + ] + } + ] + }, + { + "title": "Optimize Changefeeds", + "items": [ + { + "title": "Change Data Capture Queries", + "urls": [ + "/${VERSION}/cdc-queries.html" + ] + }, + { + "title": "Changefeeds on Tables with Column Families", + "urls": [ + "/${VERSION}/changefeeds-on-tables-with-column-families.html" + ] + }, + { + "title": "Export Data with Changefeeds", + "urls": [ + "/${VERSION}/export-data-with-changefeeds.html" + ] + }, + { + "title": "Changefeeds in Multi-Region Deployments", + "urls": [ + "/${VERSION}/changefeeds-in-multi-region-deployments.html" + ] + } + ] + }, + { + "title": "Changefeed Tutorials", + "items": [ + { + "title": "Stream a Changefeed to an Amazon MSK Cluster", + "items": [ + { + "title": "Amazon MSK", + "urls": [ + "/${VERSION}/stream-a-changefeed-to-amazon-msk.html" + ] + }, + { + "title": "Amazon MSK Serverless", + "urls": [ + "/${VERSION}/stream-a-changefeed-to-amazon-msk-serverless.html" + ] + } + ] + }, + { + "title": "Connect to a Changefeed Kafka Sink with OAuth Using Okta", + "urls": [ + "/${VERSION}/connect-to-a-changefeed-kafka-sink-with-oauth-using-okta.html" + ] + }, + { + "title": "Stream a Changefeed from CockroachDB Cloud to Snowflake", + "urls": [ + "/cockroachcloud/stream-changefeed-to-snowflake-aws.html" + ] + }, + { + "title": "Stream a Changefeed to a Confluent Cloud Kafka Cluster", + "urls": [ + "/${VERSION}/stream-a-changefeed-to-a-confluent-cloud-kafka-cluster.html" + ] + } + ] + }, + { + "title": "Technical Overview", + "items": [ + { + "title": "How Does an Enterprise Changefeed Work?", + "urls": [ + "/${VERSION}/how-does-an-enterprise-changefeed-work.html" + ] + } + ] + }, + { + "title": "Advanced Changefeed Configuration", + "urls": [ + "/${VERSION}/advanced-changefeed-configuration.html" + ] + } + ] + } + \ No newline at end of file diff --git a/src/current/_includes/v25.3/sidebar-data/troubleshooting.json b/src/current/_includes/v25.3/sidebar-data/troubleshooting.json new file mode 100644 index 00000000000..5ea8eaa2897 --- /dev/null +++ b/src/current/_includes/v25.3/sidebar-data/troubleshooting.json @@ -0,0 +1,111 @@ +{ + "title": "Troubleshooting", + "is_top_level": true, + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/troubleshooting-overview.html" + ] + }, + { + "title": "Common Errors and Solutions", + "urls": [ + "/${VERSION}/common-errors.html" + ] + }, + { + "title": "Troubleshoot Cloud Setup", + "urls": [ + "/cockroachcloud/troubleshooting-page.html" + ] + }, + { + "title": "Troubleshoot Self-Hosted Setup", + "urls": [ + "/${VERSION}/cluster-setup-troubleshooting.html" + ] + }, + { + "title": "Troubleshoot SQL Statements", + "urls": [ + "/${VERSION}/query-behavior-troubleshooting.html" + ] + }, + { + "title": "Transaction Retry Error Reference", + "urls": [ + "/${VERSION}/transaction-retry-error-reference.html" + ] + }, + { + "title": "Transaction Retry Error Example", + "urls": [ + "/${VERSION}/transaction-retry-error-example.html" + ] + }, + { + "title": "Differences in Metrics between Third-Party Monitoring Integrations and DB Console", + "urls": [ + "/${VERSION}/differences-in-metrics-between-third-party-monitoring-integrations-and-db-console.html" + ] + }, + { + "title": "Understand Hotspots", + "urls": [ + "/${VERSION}/understand-hotspots.html" + ] + }, + { + "title": "Replication Reports", + "urls": [ + "/${VERSION}/query-replication-reports.html" + ] + }, + { + "title": "Troubleshoot Replication Zones", + "urls": [ + "/${VERSION}/troubleshoot-replication-zones.html" + ] + }, + { + "title": "Benchmarking", + "items": [ + { + "title": "Overview", + "urls": [ + "/${VERSION}/performance.html" + ] + }, + { + "title": "Benchmarking with TPC-C", + "urls": [ + "/${VERSION}/performance-benchmarking-with-tpcc-local.html", + "/${VERSION}/performance-benchmarking-with-tpcc-local-multiregion.html", + "/${VERSION}/performance-benchmarking-with-tpcc-small.html", + "/${VERSION}/performance-benchmarking-with-tpcc-medium.html", + "/${VERSION}/performance-benchmarking-with-tpcc-large.html" + ] + } + ] + }, + { + "title": "Support Resources", + "urls": [ + "/${VERSION}/support-resources.html" + ] + }, + { + "title": "File an Issue", + "urls": [ + "/${VERSION}/file-an-issue.html" + ] + }, + { + "title": "Automatic CPU Profiler", + "urls": [ + "/${VERSION}/automatic-cpu-profiler.html" + ] + } + ] + } diff --git a/src/current/_includes/v25.3/spatial/ogr2ogr-supported-version.md b/src/current/_includes/v25.3/spatial/ogr2ogr-supported-version.md new file mode 100644 index 00000000000..ad444257227 --- /dev/null +++ b/src/current/_includes/v25.3/spatial/ogr2ogr-supported-version.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +An `ogr2ogr` version of 3.1.0 or higher is required to generate data that can be imported into CockroachDB. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/spatial/zmcoords.md b/src/current/_includes/v25.3/spatial/zmcoords.md new file mode 100644 index 00000000000..2eb86de3a89 --- /dev/null +++ b/src/current/_includes/v25.3/spatial/zmcoords.md @@ -0,0 +1,27 @@ + You can also store a `{{page.title}}` with the following additional dimensions: + +- A third dimension coordinate `Z` (`{{page.title}}Z`). +- A measure coordinate `M` (`{{page.title}}M`). +- Both a third dimension and a measure coordinate (`{{page.title}}ZM`). + +The `Z` and `M` dimensions can be accessed or modified using a number of [built-in functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions), including: + +- `ST_Z` +- `ST_M` +- `ST_Affine` +- `ST_Zmflag` +- `ST_MakePoint` +- `ST_MakePointM` +- `ST_Force3D` +- `ST_Force3DZ` +- `ST_Force3DM` +- `ST_Force4D` +- `ST_Snap` +- `ST_SnapToGrid` +- `ST_RotateZ` +- `ST_AddMeasure` + +Note that CockroachDB's [spatial indexing]({% link {{ page.version.version }}/spatial-indexes.md %}) is still based on the 2D coordinate system. This means that: + +- The Z/M dimension is not index accelerated when using spatial predicates. +- Some spatial functions ignore the Z/M dimension, with transformations discarding the Z/M value. diff --git a/src/current/_includes/v25.3/sql/add-size-limits-to-indexed-columns.md b/src/current/_includes/v25.3/sql/add-size-limits-to-indexed-columns.md new file mode 100644 index 00000000000..36907c10915 --- /dev/null +++ b/src/current/_includes/v25.3/sql/add-size-limits-to-indexed-columns.md @@ -0,0 +1,17 @@ +We **strongly recommend** adding size limits to all [indexed columns]({% link {{ page.version.version }}/indexes.md %}), which includes columns in [primary keys]({% link {{ page.version.version }}/primary-key.md %}). + +Values exceeding 1 MiB can lead to [storage layer write amplification]({% link {{ page.version.version }}/architecture/storage-layer.md %}#write-amplification) and cause significant performance degradation or even [crashes due to OOMs (out of memory errors)]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). + +To add a size limit using [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}): + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE name (first STRING(100), last STRING(100)); +~~~ + +To add a size limit using [`ALTER TABLE ... ALTER COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#alter-column): + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE name ALTER first TYPE STRING(99); +~~~ diff --git a/src/current/_includes/v25.3/sql/begin-transaction-as-of-system-time-example.md b/src/current/_includes/v25.3/sql/begin-transaction-as-of-system-time-example.md new file mode 100644 index 00000000000..7f2c11dac77 --- /dev/null +++ b/src/current/_includes/v25.3/sql/begin-transaction-as-of-system-time-example.md @@ -0,0 +1,19 @@ +{% include_cached copy-clipboard.html %} +~~~ sql +> BEGIN AS OF SYSTEM TIME '2019-04-09 18:02:52.0+00:00'; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM orders; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM products; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> COMMIT; +~~~ diff --git a/src/current/_includes/v25.3/sql/connection-parameters.md b/src/current/_includes/v25.3/sql/connection-parameters.md new file mode 100644 index 00000000000..b61bb3caf16 --- /dev/null +++ b/src/current/_includes/v25.3/sql/connection-parameters.md @@ -0,0 +1,9 @@ +Flag | Description +-----|------------ + `--url` | A [connection URL]({% link {{ page.version.version }}/connection-parameters.md %}#connect-using-a-url) to use instead of the other arguments. To convert a connection URL to the syntax that works with your client driver, run [`cockroach convert-url`]({% link {{ page.version.version }}/connection-parameters.md %}#convert-a-url-for-different-drivers).

**Env Variable:** `COCKROACH_URL`
**Default:** no URL +`--host` | The server host and port number to connect to. This can be the address of any node in the cluster.

**Env Variable:** `COCKROACH_HOST`
**Default:** `localhost:26257` +`--port`

`-p` | The server port to connect to. Note: The port number can also be specified via `--host`.

**Env Variable:** `COCKROACH_PORT`
**Default:** `26257` +`--user`

`-u` | The [SQL user]({% link {{ page.version.version }}/create-user.md %}) that will own the client session.

**Env Variable:** `COCKROACH_USER`
**Default:** `root` +`--insecure` | Use an insecure connection.

**Env Variable:** `COCKROACH_INSECURE`
**Default:** `false` +`--cert-principal-map` | A comma-separated list of `:` mappings. This allows mapping the principal in a cert to a DB principal such as `node` or `root` or any SQL user. This is intended for use in situations where the certificate management system places restrictions on the `Subject.CommonName` or `SubjectAlternateName` fields in the certificate (e.g., disallowing a `CommonName` like `node` or `root`). If multiple mappings are provided for the same ``, the last one specified in the list takes precedence. A principal not specified in the map is passed through as-is via the identity function. A cert is allowed to authenticate a DB principal if the DB principal name is contained in the mapped `CommonName` or DNS-type `SubjectAlternateName` fields. +`--certs-dir` | The path to the [certificate directory]({% link {{ page.version.version }}/cockroach-cert.md %}) containing the CA and client certificates and client key.

**Env Variable:** `COCKROACH_CERTS_DIR`
**Default:** `${HOME}/.cockroach-certs/` \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/covering-index.md b/src/current/_includes/v25.3/sql/covering-index.md new file mode 100644 index 00000000000..4ce5b00cf12 --- /dev/null +++ b/src/current/_includes/v25.3/sql/covering-index.md @@ -0,0 +1 @@ +An index that stores all the columns needed by a query is also known as a _covering index_ for that query. When a query has a covering index, CockroachDB can use that index directly instead of doing an "index join" with the primary index, which is likely to be slower. diff --git a/src/current/_includes/v25.3/sql/crdb-internal-is-not-supported-for-production-use.md b/src/current/_includes/v25.3/sql/crdb-internal-is-not-supported-for-production-use.md new file mode 100644 index 00000000000..475a8804b04 --- /dev/null +++ b/src/current/_includes/v25.3/sql/crdb-internal-is-not-supported-for-production-use.md @@ -0,0 +1 @@ +Many of the tables in the `crdb_internal` system catalog are **not supported for external use in production**. This output is provided **as a debugging aid only**. The output of particular `crdb_internal` facilities may change from patch release to patch release without advance warning. For more information, see [the `crdb_internal` documentation]({% link {{ page.version.version }}/crdb-internal.md %}). diff --git a/src/current/_includes/v25.3/sql/crdb-internal-partitions-example.md b/src/current/_includes/v25.3/sql/crdb-internal-partitions-example.md new file mode 100644 index 00000000000..680b0adf261 --- /dev/null +++ b/src/current/_includes/v25.3/sql/crdb-internal-partitions-example.md @@ -0,0 +1,43 @@ +## Querying partitions programmatically + +The `crdb_internal.partitions` internal table contains information about the partitions in your database. In testing, scripting, and other programmatic environments, we recommend querying this table for partition information instead of using the `SHOW PARTITIONS` statement. For example, to get all `us_west` partitions of in your database, you can run the following query: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM crdb_internal.partitions WHERE name='us_west'; +~~~ + +~~~ + table_id | index_id | parent_name | name | columns | column_names | list_value | range_value | zone_id | subzone_id ++----------+----------+-------------+---------+---------+--------------+-------------------------------------------------+-------------+---------+------------+ + 53 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 0 | 0 + 54 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 54 | 1 + 54 | 2 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 54 | 2 + 55 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 55 | 1 + 55 | 2 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 55 | 2 + 55 | 3 | NULL | us_west | 1 | vehicle_city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 55 | 3 + 56 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 56 | 1 + 58 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 58 | 1 +(8 rows) +~~~ + +Other internal tables, like `crdb_internal.tables`, include information that could be useful in conjunction with `crdb_internal.partitions`. + +For example, if you want the output for your partitions to include the name of the table and database, you can perform a join of the two tables: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT + partitions.name AS partition_name, column_names, list_value, tables.name AS table_name, database_name + FROM crdb_internal.partitions JOIN crdb_internal.tables ON partitions.table_id=tables.table_id + WHERE tables.name='users'; +~~~ + +~~~ + partition_name | column_names | list_value | table_name | database_name ++----------------+--------------+-------------------------------------------------+------------+---------------+ + us_west | city | ('seattle'), ('san francisco'), ('los angeles') | users | movr + us_east | city | ('new york'), ('boston'), ('washington dc') | users | movr + europe_west | city | ('amsterdam'), ('paris'), ('rome') | users | movr +(3 rows) +~~~ diff --git a/src/current/_includes/v25.3/sql/crdb-internal-partitions.md b/src/current/_includes/v25.3/sql/crdb-internal-partitions.md new file mode 100644 index 00000000000..11faab704cd --- /dev/null +++ b/src/current/_includes/v25.3/sql/crdb-internal-partitions.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_success}} +In testing, scripting, and other programmatic environments, we recommend querying the `crdb_internal.partitions` internal table for partition information instead of using the `SHOW PARTITIONS` statement. For more information, see [Querying partitions programmatically]({% link {{ page.version.version }}/show-partitions.md %}#querying-partitions-programmatically). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/cursors-vs-keyset-pagination.md b/src/current/_includes/v25.3/sql/cursors-vs-keyset-pagination.md new file mode 100644 index 00000000000..ba5391b5ace --- /dev/null +++ b/src/current/_includes/v25.3/sql/cursors-vs-keyset-pagination.md @@ -0,0 +1,3 @@ +_Cursors_ are stateful objects that use more database resources than keyset pagination, since each cursor holds open a transaction. However, they are easier to use, and make it easier to get consistent results without having to write complex queries from your application logic. They do not require that the results be returned in a particular order (that is, you don't have to include an `ORDER BY` clause), which makes them more flexible. + +_Keyset pagination_ queries are usually much faster than cursors since they order by indexed columns. However, in order to get that performance they require that you return results in some defined order that can be calculated by your application's queries. Because that ordering involves calculating the start/end point of pages of results based on an indexed key, they require more care to write correctly. diff --git a/src/current/_includes/v25.3/sql/db-terms.md b/src/current/_includes/v25.3/sql/db-terms.md new file mode 100644 index 00000000000..5776ed951e0 --- /dev/null +++ b/src/current/_includes/v25.3/sql/db-terms.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +To avoid confusion with the general term "[database](https://en.wikipedia.org/wiki/Database)", throughout this guide we refer to the logical object as a *database*, to CockroachDB by name, and to a deployment of CockroachDB as a [*cluster*]({% link {{ page.version.version }}/architecture/glossary.md %}#cockroachdb-architecture-terms). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/dev-schema-change-limits.md b/src/current/_includes/v25.3/sql/dev-schema-change-limits.md new file mode 100644 index 00000000000..e6f10db0bc9 --- /dev/null +++ b/src/current/_includes/v25.3/sql/dev-schema-change-limits.md @@ -0,0 +1,3 @@ +Review the [limitations of online schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}#known-limitations). CockroachDB [doesn't guarantee the atomicity of schema changes within transactions with multiple statements]({% link {{ page.version.version }}/online-schema-changes.md %}#schema-changes-within-transactions). + + Cockroach Labs recommends that you perform schema changes outside explicit transactions. When a database [schema management tool]({% link {{ page.version.version }}/third-party-database-tools.md %}#schema-migration-tools) manages transactions on your behalf, include one schema change operation per transaction. diff --git a/src/current/_includes/v25.3/sql/dev-schema-changes.md b/src/current/_includes/v25.3/sql/dev-schema-changes.md new file mode 100644 index 00000000000..9e42fd08614 --- /dev/null +++ b/src/current/_includes/v25.3/sql/dev-schema-changes.md @@ -0,0 +1 @@ +Use a [database schema migration tool]({% link {{ page.version.version }}/third-party-database-tools.md %}#schema-migration-tools) or the [CockroachDB SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) instead of a [client library]({% link {{ page.version.version }}/third-party-database-tools.md %}#drivers) to execute [database schema changes](online-schema-changes.html). diff --git a/src/current/_includes/v25.3/sql/disallow-full-table-scans.md b/src/current/_includes/v25.3/sql/disallow-full-table-scans.md new file mode 100644 index 00000000000..0e83d177878 --- /dev/null +++ b/src/current/_includes/v25.3/sql/disallow-full-table-scans.md @@ -0,0 +1,12 @@ +- At the cluster level, set `disallow_full_table_scans` for some or [all users and roles]({% link {{ page.version.version }}/alter-role.md %}#set-default-session-variable-values-for-all-users). For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER ROLE ALL SET disallow_full_table_scans = true; + ~~~ + +- At the application level, add `disallow_full_table_scans` to the connection string using the [`options` parameter]({% link {{page.version.version}}/connection-parameters.md %}#additional-connection-parameters). + +If you disable full scans, you can set the [`large_full_scan_rows` session variable]({% link {{ page.version.version }}/set-vars.md %}#large-full-scan-rows) to specify the maximum table size allowed for a full scan. If no alternative plan is possible, the optimizer will return an error. + +If you disable full scans, and you provide an [index hint]({% link {{ page.version.version }}/indexes.md %}#selection), the optimizer will try to avoid a full scan while also respecting the index hint. If this is not possible, the optimizer will return an error. If you do not provide an index hint and it is not possible to avoid a full scan, the optimizer will return an error, the full scan will be logged, and the `sql.guardrails.full_scan_rejected.count` [metric]({% link {{ page.version.version }}/ui-overview-dashboard.md %}) will be updated. diff --git a/src/current/_includes/v25.3/sql/drop-role-considerations.md b/src/current/_includes/v25.3/sql/drop-role-considerations.md new file mode 100644 index 00000000000..585a062934b --- /dev/null +++ b/src/current/_includes/v25.3/sql/drop-role-considerations.md @@ -0,0 +1,4 @@ +- The `admin` user/role cannot be dropped, and `root` must always be a member of `admin`. +- A user/role cannot be dropped if it has privileges. Use [`REVOKE`]({% link {{ page.version.version }}/revoke.md %}) to remove privileges. +- Users/roles that [own objects]({% link {{ page.version.version }}/security-reference/authorization.md %}#object-ownership) (such as databases, tables, schemas, and types) cannot be dropped until the [ownership is transferred to another user/role]({% link {{ page.version.version }}/alter-database.md %}#change-a-databases-owner). +- If a user/role is logged in while a [different session]({% link {{ page.version.version }}/show-sessions.md %}) drops that user, CockroachDB checks that the user exists before allowing it to inherit privileges from [the `public` role]({% link {{ page.version.version }}/security-reference/authorization.md %}). In addition, any active [web]({% link {{ page.version.version }}/ui-overview.md %}#authentication) [sessions]({% link {{ page.version.version }}/cockroach-auth-session.md %}) are revoked when a user is dropped. diff --git a/src/current/_includes/v25.3/sql/enable-super-region-primary-region-changes.md b/src/current/_includes/v25.3/sql/enable-super-region-primary-region-changes.md new file mode 100644 index 00000000000..94920f7d481 --- /dev/null +++ b/src/current/_includes/v25.3/sql/enable-super-region-primary-region-changes.md @@ -0,0 +1,23 @@ +By default, you may not change the [primary region]({% link {{ page.version.version }}/alter-database.md %}#set-primary-region) of a [multi-region database]({% link {{ page.version.version }}/multiregion-overview.md %}) when that region is part of a super region. This is a safety setting designed to prevent you from accidentally moving the data for a [regional table]({% link {{ page.version.version }}/regional-tables.md %}) that is meant to be stored in the super region out of that super region, which could break your data domiciling setup. + +If you are sure about what you are doing, you can allow modifying the primary region by setting the `alter_primary_region_super_region_override` [session setting]({% link {{ page.version.version }}/set-vars.md %}) to `'on'`: + +{% include_cached copy-clipboard.html %} +~~~ sql +SET alter_primary_region_super_region_override = 'on'; +~~~ + +~~~ +SET +~~~ + +You can also accomplish this by setting the `sql.defaults.alter_primary_region_super_region_override.enable` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`: + +{% include_cached copy-clipboard.html %} +~~~ sql +SET CLUSTER SETTING sql.defaults.alter_primary_region_super_region_override.enable = true; +~~~ + +~~~ +SET CLUSTER SETTING +~~~ diff --git a/src/current/_includes/v25.3/sql/enable-super-regions.md b/src/current/_includes/v25.3/sql/enable-super-regions.md new file mode 100644 index 00000000000..0dd7ac26077 --- /dev/null +++ b/src/current/_includes/v25.3/sql/enable-super-regions.md @@ -0,0 +1,21 @@ +To enable super regions, set the `enable_super_regions` [session setting]({% link {{ page.version.version }}/set-vars.md %}) to `'on'`: + +{% include_cached copy-clipboard.html %} +~~~ sql +SET enable_super_regions = 'on'; +~~~ + +~~~ +SET +~~~ + +You can also set the `sql.defaults.super_regions.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`: + +{% include_cached copy-clipboard.html %} +~~~ sql +SET CLUSTER SETTING sql.defaults.super_regions.enabled = true; +~~~ + +~~~ +SET CLUSTER SETTING +~~~ diff --git a/src/current/_includes/v25.3/sql/export-csv-tsv.md b/src/current/_includes/v25.3/sql/export-csv-tsv.md new file mode 100644 index 00000000000..ea1e69968a0 --- /dev/null +++ b/src/current/_includes/v25.3/sql/export-csv-tsv.md @@ -0,0 +1,11 @@ +[`IMPORT INTO`](import-into.html) requires that you export one file per table with the following attributes: + +- Files must be in valid [CSV](https://tools.ietf.org/html/rfc4180) (comma-separated values) or [TSV](https://www.iana.org/assignments/media-types/text/tab-separated-values) (tab-separated values) format. +- The delimiter must be a single character. Use the [`delimiter` option](import-into.html#import-options) to set a character other than a comma (such as a tab, for TSV format). +- Files must be UTF-8 encoded. +- If one of the following characters appears in a field, the field must be enclosed by double quotes: + - Delimiter (`,` by default). + - Double quote (`"`). Because the field will be enclosed by double quotes, escape a double quote inside a field by preceding it with another double quote. For example: `"aaa","b""bb","ccc"`. + - Newline (`\n`). + - Carriage return (`\r`). +- If a column is of type [`BYTES`](bytes.html), it can either be a valid UTF-8 string or a [hex-encoded byte literal](sql-constants.html#hexadecimal-encoded-byte-array-literals) beginning with `\x`. For example, a field whose value should be the bytes `1`, `2` would be written as `\x0102`. \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/function-special-forms.md b/src/current/_includes/v25.3/sql/function-special-forms.md new file mode 100644 index 00000000000..b9ac987444a --- /dev/null +++ b/src/current/_includes/v25.3/sql/function-special-forms.md @@ -0,0 +1,29 @@ +| Special form | Equivalent to | +|-----------------------------------------------------------|---------------------------------------------| +| `AT TIME ZONE` | `timezone()` | +| `CURRENT_CATALOG` | `current_catalog()` | +| `COLLATION FOR` | `pg_collation_for()` | +| `CURRENT_DATE` | `current_date()` | +| `CURRENT_ROLE` | `current_user()` | +| `CURRENT_SCHEMA` | `current_schema()` | +| `CURRENT_TIMESTAMP` | `current_timestamp()` | +| `CURRENT_TIME` | `current_time()` | +| `CURRENT_USER` | `current_user()` | +| `EXTRACT( FROM )` | `extract("", )` | +| `EXTRACT_DURATION( FROM )` | `extract_duration("", )` | +| `OVERLAY( PLACING FROM FOR )` | `overlay(, , , )` | +| `OVERLAY( PLACING FROM )` | `overlay(, , )` | +| `POSITION( IN )` | `strpos(, )` | +| `SESSION_USER` | `current_user()` | +| `SUBSTRING( FOR FROM )` | `substring(, , )` | +| `SUBSTRING( FOR )` | `substring(, 1, )` | +| `SUBSTRING( FROM FOR )` | `substring(, , )` | +| `SUBSTRING( FROM )` | `substring(, )` | +| `TRIM( FROM )` | `btrim(, )` | +| `TRIM(, )` | `btrim(, )` | +| `TRIM(FROM )` | `btrim()` | +| `TRIM(LEADING FROM )` | `ltrim(, )` | +| `TRIM(LEADING FROM )` | `ltrim()` | +| `TRIM(TRAILING FROM )` | `rtrim(, )` | +| `TRIM(TRAILING FROM )` | `rtrim()` | +| `USER` | `current_user()` | diff --git a/src/current/_includes/v25.3/sql/global-table-description.md b/src/current/_includes/v25.3/sql/global-table-description.md new file mode 100644 index 00000000000..515f1dbd4c4 --- /dev/null +++ b/src/current/_includes/v25.3/sql/global-table-description.md @@ -0,0 +1,7 @@ +A _global_ table is optimized for low-latency reads from every region in the database. This means that any region can effectively act as the home region of the table. The tradeoff is that writes will incur higher latencies from any given region, since writes have to be replicated across every region to make the global low-latency reads possible. Use global tables when your application has a "read-mostly" table of reference data that is rarely updated, and needs to be available to all regions. + +For an example of a table that can benefit from the _global_ table locality setting in a multi-region deployment, see the `promo_codes` table from the [MovR application]({% link {{ page.version.version }}/movr.md %}). + +For instructions showing how to set a table's locality to `GLOBAL`, see [`ALTER TABLE ... SET LOCALITY`]({% link {{ page.version.version }}/alter-table.md %}#global). + +For more information about global tables, including troubleshooting information, see [Global Tables]({% link {{ page.version.version }}/global-tables.md %}). diff --git a/src/current/_includes/v25.3/sql/import-into-default-value.md b/src/current/_includes/v25.3/sql/import-into-default-value.md new file mode 100644 index 00000000000..e7ef86fa18b --- /dev/null +++ b/src/current/_includes/v25.3/sql/import-into-default-value.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +Column values cannot be generated by [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) when importing; an import must include a value for every column specified in the `IMPORT INTO` statement. To use `DEFAULT` values, your file must contain values for the column upon import, or you can [add the column]({% link {{ page.version.version }}/alter-table.md %}#add-column) or [alter the column]({% link {{ page.version.version }}/alter-table.md %}#set-or-change-a-default-value) after the table has been imported. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/import-into-regional-by-row-table.md b/src/current/_includes/v25.3/sql/import-into-regional-by-row-table.md new file mode 100644 index 00000000000..ffb93fc8046 --- /dev/null +++ b/src/current/_includes/v25.3/sql/import-into-regional-by-row-table.md @@ -0,0 +1 @@ +`IMPORT INTO` cannot directly import data to [`REGIONAL BY ROW`]({% link {{ page.version.version }}/alter-table.md %}#regional-by-row) tables that are part of [multi-region databases]({% link {{ page.version.version }}/multiregion-overview.md %}). For more information, including a workaround for this limitation, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#import-into-a-regional-by-row-table). diff --git a/src/current/_includes/v25.3/sql/indexes-regional-by-row.md b/src/current/_includes/v25.3/sql/indexes-regional-by-row.md new file mode 100644 index 00000000000..e02a9abcafb --- /dev/null +++ b/src/current/_includes/v25.3/sql/indexes-regional-by-row.md @@ -0,0 +1,3 @@ + In [multi-region deployments]({% link {{ page.version.version }}/multiregion-overview.md %}), most users should use [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) instead of explicit index [partitioning]({% link {{ page.version.version }}/partitioning.md %}). When you add an index to a `REGIONAL BY ROW` table, it is automatically partitioned on the [`crdb_region` column](alter-table.html#crdb_region). Explicit index partitioning is not required. + + While CockroachDB process an [`ADD REGION`]({% link {{ page.version.version }}/alter-database.md %}#add-region) or [`DROP REGION`]({% link {{ page.version.version }}/alter-database.md %}#drop-region) statement on a particular database, creating or modifying an index will throw an error. Similarly, all [`ADD REGION`]({% link {{ page.version.version }}/alter-database.md %}#add-region) and [`DROP REGION`]({% link {{ page.version.version }}/alter-database.md %}#drop-region) statements will be blocked while an index is being modified on a `REGIONAL BY ROW` table within the same database. diff --git a/src/current/_includes/v25.3/sql/insert-vs-upsert.md b/src/current/_includes/v25.3/sql/insert-vs-upsert.md new file mode 100644 index 00000000000..f22d20ea511 --- /dev/null +++ b/src/current/_includes/v25.3/sql/insert-vs-upsert.md @@ -0,0 +1,3 @@ +When inserting or updating columns on a table that does not have [secondary indexes]({% link {{ page.version.version }}/indexes.md %}), Cockroach Labs recommends using an `UPSERT` statement instead of `INSERT ON CONFLICT DO UPDATE`. Whereas `INSERT ON CONFLICT` always performs a read, the `UPSERT` statement writes without reading, making it faster. This may be useful if you are using a simple SQL table of two columns to [simulate direct KV access]({% link {{ page.version.version }}/sql-faqs.md %}#can-i-use-cockroachdb-as-a-key-value-store). + +If the table has a secondary index, there is no performance difference between `UPSERT` and `INSERT ON CONFLICT`. However, `INSERT` without an `ON CONFLICT` clause may not scan the table for existing values. This can provide a performance improvement over `UPSERT`. \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/inverted-joins.md b/src/current/_includes/v25.3/sql/inverted-joins.md new file mode 100644 index 00000000000..a66d0a651fe --- /dev/null +++ b/src/current/_includes/v25.3/sql/inverted-joins.md @@ -0,0 +1,97 @@ +To run these examples, initialize a demo cluster with the MovR workload. + +{% include {{ page.version.version }}/demo_movr.md %} + +Create a GIN index on the `vehicles` table's `ext` column. + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE INVERTED INDEX idx_vehicle_details ON vehicles(ext); +~~~ + +Check the statement plan for a `SELECT` statement that uses an inner inverted join. + +{% include_cached copy-clipboard.html %} +~~~ sql +EXPLAIN SELECT * FROM vehicles@vehicles_pkey AS v2 INNER INVERTED JOIN vehicles@idx_vehicle_details AS v1 ON v1.ext @> v2.ext; +~~~ + +~~~ + info +--------------------------------------------- + distribution: full + vectorized: true + + • lookup join + │ table: vehicles@vehicles_pkey + │ equality: (city, id) = (city,id) + │ equality cols are key + │ pred: ext @> ext + │ + └── • inverted join + │ table: vehicles@idx_vehicle_details + │ + └── • scan + estimated row count: 3,750 (100% of the table; stats collected 1 hour ago) + table: vehicles@vehicles_pkey + spans: FULL SCAN +(16 rows) +~~~ + +You can omit the `INNER INVERTED JOIN` statement by putting `v1.ext` on the left side of a `@>` join condition in a `WHERE` clause and using an [index hint]({% link {{ page.version.version }}/table-expressions.md %}#force-index-selection) for the GIN index. + +{% include_cached copy-clipboard.html %} +~~~ sql +EXPLAIN SELECT * FROM vehicles@idx_vehicle_details AS v1, vehicles AS v2 WHERE v1.ext @> v2.ext; +~~~ + +~~~ + info +-------------------------------------------------------------------------------------------- + distribution: full + vectorized: true + + • lookup join + │ table: vehicles@vehicles_pkey + │ equality: (city, id) = (city,id) + │ equality cols are key + │ pred: ext @> ext + │ + └── • inverted join + │ table: vehicles@idx_vehicle_details + │ + └── • scan + estimated row count: 3,750 (100% of the table; stats collected 1 hour ago) + table: vehicles@vehicles_pkey + spans: FULL SCAN +(16 rows) +~~~ + +Use the `LEFT INVERTED JOIN` hint to perform a left inverted join. + +{% include_cached copy-clipboard.html %} +~~~ sql +EXPLAIN SELECT * FROM vehicles AS v2 LEFT INVERTED JOIN vehicles AS v1 ON v1.ext @> v2.ext; +~~~ + +~~~ + info +------------------------------------------------------------------------------------------ + distribution: full + vectorized: true + + • lookup join (left outer) + │ table: vehicles@vehicles_pkey + │ equality: (city, id) = (city,id) + │ equality cols are key + │ pred: ext @> ext + │ + └── • inverted join (left outer) + │ table: vehicles@idx_vehicle_details + │ + └── • scan + estimated row count: 3,750 (100% of the table; stats collected 1 hour ago) + table: vehicles@vehicles_pkey + spans: FULL SCAN +(16 rows) +~~~ diff --git a/src/current/_includes/v25.3/sql/isolation-levels.md b/src/current/_includes/v25.3/sql/isolation-levels.md new file mode 100644 index 00000000000..9d773891176 --- /dev/null +++ b/src/current/_includes/v25.3/sql/isolation-levels.md @@ -0,0 +1,7 @@ +Isolation is an element of [ACID transactions](https://en.wikipedia.org/wiki/ACID) that determines how concurrency is controlled, and ultimately guarantees consistency. CockroachDB offers two transaction isolation levels: [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) and [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}). + +By default, CockroachDB executes all transactions at the strongest ANSI transaction isolation level: `SERIALIZABLE`, which permits no concurrency anomalies. To place all transactions in a serializable ordering, `SERIALIZABLE` isolation may require [transaction restarts]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and [client-side retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For a demonstration of how `SERIALIZABLE` prevents anomalies such as write skew, refer to [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}). + +`READ COMMITTED` permits some concurrency anomalies in exchange for minimizing transaction aborts and removing the need for client-side retries. Depending on your workload requirements, this may be desirable. For more information, refer to [Read Committed Transactions]({% link {{ page.version.version }}/read-committed.md %}). + +{% include {{ page.version.version }}/sql/mixed-isolation-levels.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/limit-row-size.md b/src/current/_includes/v25.3/sql/limit-row-size.md new file mode 100644 index 00000000000..ae9e95a1391 --- /dev/null +++ b/src/current/_includes/v25.3/sql/limit-row-size.md @@ -0,0 +1,22 @@ +## Limit the size of rows + +To help you avoid failures arising from misbehaving applications that bloat the size of rows, you can specify the behavior when a row or individual [column family]({% link {{ page.version.version }}/column-families.md %}) larger than a specified size is written to the database. Use the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) `sql.guardrails.max_row_size_log` to discover large rows and `sql.guardrails.max_row_size_err` to reject large rows. + +When you write a row that exceeds `sql.guardrails.max_row_size_log`: + +- `INSERT`, `UPSERT`, `UPDATE`, `CREATE TABLE AS`, `CREATE INDEX`, `ALTER TABLE`, `ALTER INDEX`, `IMPORT`, or `RESTORE` statements will log a `LargeRow` to the [`SQL_PERF`]({% link {{ page.version.version }}/logging.md %}#sql_perf) channel. +- `SELECT`, `DELETE`, `TRUNCATE`, and `DROP` are not affected. + +When you write a row that exceeds `sql.guardrails.max_row_size_err`: + +- `INSERT`, `UPSERT`, and `UPDATE` statements will fail with a code `54000 (program_limit_exceeded)` error. + +- `CREATE TABLE AS`, `CREATE INDEX`, `ALTER TABLE`, `ALTER INDEX`, `IMPORT`, and `RESTORE` statements will log a `LargeRowInternal` event to the [`SQL_INTERNAL_PERF`]({% link {{ page.version.version }}/logging.md %}#sql_internal_perf) channel. + +- `SELECT`, `DELETE`, `TRUNCATE`, and `DROP` are not affected. + +You **cannot** update existing rows that violate the limit unless the update shrinks the size of the +row below the limit. You **can** select, delete, alter, back up, and restore such rows. We +recommend using the accompanying setting `sql.guardrails.max_row_size_log` in conjunction with +`SELECT pg_column_size()` queries to detect and fix any existing large rows before lowering +`sql.guardrails.max_row_size_err`. diff --git a/src/current/_includes/v25.3/sql/locality-optimized-search.md b/src/current/_includes/v25.3/sql/locality-optimized-search.md new file mode 100644 index 00000000000..23cac1bc9d9 --- /dev/null +++ b/src/current/_includes/v25.3/sql/locality-optimized-search.md @@ -0,0 +1 @@ +Note that the [SQL engine]({% link {{ page.version.version }}/architecture/sql-layer.md %}) will avoid sending requests to nodes in other regions when it can instead read a value from a unique column that is stored locally. This capability is known as [_locality optimized search_]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters). diff --git a/src/current/_includes/v25.3/sql/macos-terminal-configuration.md b/src/current/_includes/v25.3/sql/macos-terminal-configuration.md new file mode 100644 index 00000000000..5b636259ce1 --- /dev/null +++ b/src/current/_includes/v25.3/sql/macos-terminal-configuration.md @@ -0,0 +1,14 @@ +In **Apple Terminal**: + +1. Navigate to "Preferences", then "Profiles", then "Keyboard". +1. Enable the checkbox "Use Option as Meta Key". + +Apple Terminal Alt key configuration + +In **iTerm2**: + +1. Navigate to "Preferences", then "Profiles", then "Keys". +1. Select the radio button "Esc+" for the behavior of the Left Option Key. + +iTerm2 Alt key configuration + diff --git a/src/current/_includes/v25.3/sql/mixed-isolation-levels.md b/src/current/_includes/v25.3/sql/mixed-isolation-levels.md new file mode 100644 index 00000000000..440e29f24bd --- /dev/null +++ b/src/current/_includes/v25.3/sql/mixed-isolation-levels.md @@ -0,0 +1,12 @@ +{% if page.name == "transactions.md" %}### Mixed isolation levels{% else if page.name == "transaction-layer.md" %}#### Mixed isolation levels{% endif %} + +Regardless of the isolation levels of other transactions, transactions behave according to their respective isolation levels: Statements in `SERIALIZABLE` transactions see data that committed before the transaction began, whereas statements in `READ COMMITTED` transactions see data that committed before each **statement** began. Therefore: + +- If a `READ COMMITTED` transaction `R` commits before a `SERIALIZABLE` transaction `S`, every statement in `S` will observe all writes from `R`. Otherwise, `S` will not observe any writes from `R`. +- If a `SERIALIZABLE` transaction `S` commits before a `READ COMMITTED` transaction `R`, every **subsequent** statement in `R` will observe all writes from `S`. Otherwise, `R` will not observe any writes from `S`. + +However, there is one difference in how `SERIALIZABLE` writes affect non-locking reads: While writes in a `SERIALIZABLE` transaction can block reads in concurrent `SERIALIZABLE` transactions, they will **not** block reads in concurrent `READ COMMITTED` transactions. Writes in a `READ COMMITTED` transaction will never block reads in concurrent transactions, regardless of their isolation levels. Therefore: + +- If a `READ COMMITTED` transaction `R` writes but does not commit before a `SERIALIZABLE` transaction `S`, no statement in `S` will observe or be blocked by any uncommitted writes from `R`. +- If a `SERIALIZABLE` transaction `S` writes but does not commit before a `READ COMMITTED` transaction `R`, no statement in `R` will observe or be blocked by any uncommitted writes from `S`. +- If a `SERIALIZABLE` transaction `S1` writes but does not commit before a `SERIALIZABLE` transaction `S2`, the first statement in `S2` that would observe an unwritten row from `S1` will be blocked until `S1` commits or aborts. \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/movr-start-nodes.md b/src/current/_includes/v25.3/sql/movr-start-nodes.md new file mode 100644 index 00000000000..3af9ecfbf2b --- /dev/null +++ b/src/current/_includes/v25.3/sql/movr-start-nodes.md @@ -0,0 +1,6 @@ +Run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--nodes`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) and [`--demo-locality`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) flags This command opens an interactive SQL shell to a temporary, multi-node in-memory cluster with the `movr` database preloaded and set as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database). + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach demo --nodes=3 --demo-locality=region=us-east1:region=us-central1:region=us-west1 + ~~~ diff --git a/src/current/_includes/v25.3/sql/movr-start.md b/src/current/_includes/v25.3/sql/movr-start.md new file mode 100644 index 00000000000..2c1bb50abf2 --- /dev/null +++ b/src/current/_includes/v25.3/sql/movr-start.md @@ -0,0 +1,62 @@ +- Run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) to start a temporary, in-memory cluster with the `movr` dataset preloaded: + + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach demo + ~~~ + +- Load the `movr` dataset into a persistent local cluster and open an interactive SQL shell: + 1. Start a [secure]({% link {{ page.version.version }}/secure-a-cluster.md %}) or [insecure]({% link {{ page.version.version }}/start-a-local-cluster.md %}) local cluster. + 1. Use [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) to load the `movr` dataset: + +
+ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach workload init movr 'postgresql://root@localhost:26257?sslcert=certs%2Fclient.root.crt&sslkey=certs%2Fclient.root.key&sslmode=verify-full&sslrootcert=certs%2Fca.crt' + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach workload init movr 'postgresql://root@localhost:26257?sslmode=disable' + ~~~ + +
+ 1. Use [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}) to open an interactive SQL shell and set `movr` as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database): + +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --certs-dir=certs --host=localhost:26257 + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > USE movr; + ~~~ + +
+ +
+ + {% include_cached copy-clipboard.html %} + ~~~ shell + $ cockroach sql --insecure --host=localhost:26257 + ~~~ + + {% include_cached copy-clipboard.html %} + ~~~ sql + > USE movr; + ~~~ + +
diff --git a/src/current/_includes/v25.3/sql/movr-statements-geo-partitioned-replicas.md b/src/current/_includes/v25.3/sql/movr-statements-geo-partitioned-replicas.md new file mode 100644 index 00000000000..a6a72589436 --- /dev/null +++ b/src/current/_includes/v25.3/sql/movr-statements-geo-partitioned-replicas.md @@ -0,0 +1,10 @@ +#### Setup + +The following examples use MovR, a fictional vehicle-sharing application, to demonstrate CockroachDB SQL statements. For more information about the MovR example application and dataset, see [MovR: A Global Vehicle-sharing App]({% link {{ page.version.version }}/movr.md %}). + +To follow along, run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the `--geo-partitioned-replicas` flag. This command opens an interactive SQL shell to a temporary, 9-node in-memory cluster with the `movr` database. + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach demo --geo-partitioned-replicas +~~~ diff --git a/src/current/_includes/v25.3/sql/movr-statements-nodes.md b/src/current/_includes/v25.3/sql/movr-statements-nodes.md new file mode 100644 index 00000000000..603cf823a13 --- /dev/null +++ b/src/current/_includes/v25.3/sql/movr-statements-nodes.md @@ -0,0 +1,10 @@ +### Setup + +The following examples use MovR, a fictional vehicle-sharing application, to demonstrate CockroachDB SQL statements. For more information about the MovR example application and dataset, see [MovR: A Global Vehicle-sharing App]({% link {{ page.version.version }}/movr.md %}). + +To follow along, run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--nodes`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) and [`--demo-locality`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) flags. This command opens an interactive SQL shell to a temporary, multi-node in-memory cluster with the `movr` database preloaded and set as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database). + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach demo --nodes=6 --demo-locality=region=us-east,zone=us-east-a:region=us-east,zone=us-east-b:region=us-central,zone=us-central-a:region=us-central,zone=us-central-b:region=us-west,zone=us-west-a:region=us-west,zone=us-west-b +~~~ diff --git a/src/current/_includes/v25.3/sql/movr-statements-partitioning.md b/src/current/_includes/v25.3/sql/movr-statements-partitioning.md new file mode 100644 index 00000000000..d0e2468269a --- /dev/null +++ b/src/current/_includes/v25.3/sql/movr-statements-partitioning.md @@ -0,0 +1,10 @@ +The following examples use MovR, a fictional vehicle-sharing application, to demonstrate CockroachDB SQL statements. For more information about the MovR example application and dataset, see [MovR: A Global Vehicle-sharing App]({% link {{ page.version.version }}/movr.md %}). + +To follow along with the examples below, open a new terminal and run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--nodes`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) and [`--demo-locality`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) flags. This command opens an interactive SQL shell to a temporary, multi-node in-memory cluster with the `movr` database preloaded and set as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database). + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach demo \ +--nodes=9 \ +--demo-locality=region=us-east1:region=us-east1:region=us-east1:region=us-central1:region=us-central1:region=us-central1:region=us-west1:region=us-west1:region=us-west1 +~~~ diff --git a/src/current/_includes/v25.3/sql/movr-statements.md b/src/current/_includes/v25.3/sql/movr-statements.md new file mode 100644 index 00000000000..457e2b3a38a --- /dev/null +++ b/src/current/_includes/v25.3/sql/movr-statements.md @@ -0,0 +1,8 @@ +#### Setup + +To follow along, run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) to start a temporary, in-memory cluster with the [`movr`]({% link {{ page.version.version }}/movr.md %}) sample dataset preloaded: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach demo +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/multiregion-example-setup.md b/src/current/_includes/v25.3/sql/multiregion-example-setup.md new file mode 100644 index 00000000000..9b99539ed4c --- /dev/null +++ b/src/current/_includes/v25.3/sql/multiregion-example-setup.md @@ -0,0 +1,26 @@ +#### Setup + +Only a [cluster region]({% link {{ page.version.version }}/multiregion-overview.md %}#cluster-regions) specified [at node startup]({% link {{ page.version.version }}/cockroach-start.md %}#locality) can be used as a [database region]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions). + +To follow along with the examples in this section, start a [demo cluster]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--global` flag]({% link {{ page.version.version }}/cockroach-demo.md %}#general) to simulate a multi-region cluster: + +{% include_cached copy-clipboard.html %} +~~~ shell +$ cockroach demo --global --nodes 9 +~~~ + +To see the regions available to the databases in the cluster, use a [`SHOW REGIONS FROM CLUSTER`]({% link {{ page.version.version }}/show-regions.md %}#view-the-regions-in-a-cluster) statement: + +{% include_cached copy-clipboard.html %} +~~~ sql +SHOW REGIONS FROM CLUSTER; +~~~ + +~~~ + region | zones +---------------+---------- + europe-west1 | {b,c,d} + us-east1 | {b,c,d} + us-west1 | {a,b,c} +(3 rows) +~~~ \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/multiregion-movr-add-regions.md b/src/current/_includes/v25.3/sql/multiregion-movr-add-regions.md new file mode 100644 index 00000000000..f5cf62f6dd6 --- /dev/null +++ b/src/current/_includes/v25.3/sql/multiregion-movr-add-regions.md @@ -0,0 +1,8 @@ +Execute the following statements. They will tell CockroachDB about the database's regions. This information is necessary so that CockroachDB can later move data around to optimize access to particular data from particular regions. For more information about how this works at a high level, see [Database Regions]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions). + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER DATABASE movr PRIMARY REGION "us-east1"; +ALTER DATABASE movr ADD REGION "europe-west1"; +ALTER DATABASE movr ADD REGION "us-west1"; +~~~ diff --git a/src/current/_includes/v25.3/sql/multiregion-movr-global.md b/src/current/_includes/v25.3/sql/multiregion-movr-global.md new file mode 100644 index 00000000000..e1571108f3a --- /dev/null +++ b/src/current/_includes/v25.3/sql/multiregion-movr-global.md @@ -0,0 +1,17 @@ +Because the data in `promo_codes` is not updated frequently (a.k.a., "read-mostly"), and needs to be available from any region, the right table locality is [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables). + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE promo_codes SET locality GLOBAL; +~~~ + +Next, alter the `user_promo_codes` table to have a foreign key into the global `promo_codes` table. This will enable fast reads of the `promo_codes.code` column from any region in the cluster. + +{% include_cached copy-clipboard.html %} +~~~ sql +ALTER TABLE user_promo_codes + ADD CONSTRAINT user_promo_codes_code_fk + FOREIGN KEY (code) + REFERENCES promo_codes (code) + ON UPDATE CASCADE; +~~~ diff --git a/src/current/_includes/v25.3/sql/multiregion-movr-regional-by-row.md b/src/current/_includes/v25.3/sql/multiregion-movr-regional-by-row.md new file mode 100644 index 00000000000..a2c1cfdb5b9 --- /dev/null +++ b/src/current/_includes/v25.3/sql/multiregion-movr-regional-by-row.md @@ -0,0 +1,103 @@ +All of the tables except `promo_codes` contain rows which are partitioned by region, and updated very frequently. For these tables, the right table locality for optimizing access to their data is [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables). + +Apply this table locality to the remaining tables. These statements use a `CASE` statement to put data for a given city in the right region and can take around 1 minute to complete for each table. + +- `rides` + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER TABLE rides ADD COLUMN region crdb_internal_region AS ( + CASE WHEN city = 'amsterdam' THEN 'europe-west1' + WHEN city = 'paris' THEN 'europe-west1' + WHEN city = 'rome' THEN 'europe-west1' + WHEN city = 'new york' THEN 'us-east1' + WHEN city = 'boston' THEN 'us-east1' + WHEN city = 'washington dc' THEN 'us-east1' + WHEN city = 'san francisco' THEN 'us-west1' + WHEN city = 'seattle' THEN 'us-west1' + WHEN city = 'los angeles' THEN 'us-west1' + END + ) STORED; + ALTER TABLE rides ALTER COLUMN REGION SET NOT NULL; + ALTER TABLE rides SET LOCALITY REGIONAL BY ROW AS "region"; + ~~~ + +- `user_promo_codes` + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER TABLE user_promo_codes ADD COLUMN region crdb_internal_region AS ( + CASE WHEN city = 'amsterdam' THEN 'europe-west1' + WHEN city = 'paris' THEN 'europe-west1' + WHEN city = 'rome' THEN 'europe-west1' + WHEN city = 'new york' THEN 'us-east1' + WHEN city = 'boston' THEN 'us-east1' + WHEN city = 'washington dc' THEN 'us-east1' + WHEN city = 'san francisco' THEN 'us-west1' + WHEN city = 'seattle' THEN 'us-west1' + WHEN city = 'los angeles' THEN 'us-west1' + END + ) STORED; + ALTER TABLE user_promo_codes ALTER COLUMN REGION SET NOT NULL; + ALTER TABLE user_promo_codes SET LOCALITY REGIONAL BY ROW AS "region"; + ~~~ + +- `users` + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER TABLE users ADD COLUMN region crdb_internal_region AS ( + CASE WHEN city = 'amsterdam' THEN 'europe-west1' + WHEN city = 'paris' THEN 'europe-west1' + WHEN city = 'rome' THEN 'europe-west1' + WHEN city = 'new york' THEN 'us-east1' + WHEN city = 'boston' THEN 'us-east1' + WHEN city = 'washington dc' THEN 'us-east1' + WHEN city = 'san francisco' THEN 'us-west1' + WHEN city = 'seattle' THEN 'us-west1' + WHEN city = 'los angeles' THEN 'us-west1' + END + ) STORED; + ALTER TABLE users ALTER COLUMN REGION SET NOT NULL; + ALTER TABLE users SET LOCALITY REGIONAL BY ROW AS "region"; + ~~~ + +- `vehicle_location_histories` + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER TABLE vehicle_location_histories ADD COLUMN region crdb_internal_region AS ( + CASE WHEN city = 'amsterdam' THEN 'europe-west1' + WHEN city = 'paris' THEN 'europe-west1' + WHEN city = 'rome' THEN 'europe-west1' + WHEN city = 'new york' THEN 'us-east1' + WHEN city = 'boston' THEN 'us-east1' + WHEN city = 'washington dc' THEN 'us-east1' + WHEN city = 'san francisco' THEN 'us-west1' + WHEN city = 'seattle' THEN 'us-west1' + WHEN city = 'los angeles' THEN 'us-west1' + END + ) STORED; + ALTER TABLE vehicle_location_histories ALTER COLUMN REGION SET NOT NULL; + ALTER TABLE vehicle_location_histories SET LOCALITY REGIONAL BY ROW AS "region"; + ~~~ + +- `vehicles` + + {% include_cached copy-clipboard.html %} + ~~~ sql + ALTER TABLE vehicles ADD COLUMN region crdb_internal_region AS ( + CASE WHEN city = 'amsterdam' THEN 'europe-west1' + WHEN city = 'paris' THEN 'europe-west1' + WHEN city = 'rome' THEN 'europe-west1' + WHEN city = 'new york' THEN 'us-east1' + WHEN city = 'boston' THEN 'us-east1' + WHEN city = 'washington dc' THEN 'us-east1' + WHEN city = 'san francisco' THEN 'us-west1' + WHEN city = 'seattle' THEN 'us-west1' + WHEN city = 'los angeles' THEN 'us-west1' + END + ) STORED; + ALTER TABLE vehicles ALTER COLUMN REGION SET NOT NULL; + ALTER TABLE vehicles SET LOCALITY REGIONAL BY ROW AS "region"; + ~~~ diff --git a/src/current/_includes/v25.3/sql/no-full-scan.md b/src/current/_includes/v25.3/sql/no-full-scan.md new file mode 100644 index 00000000000..304c2ab9697 --- /dev/null +++ b/src/current/_includes/v25.3/sql/no-full-scan.md @@ -0,0 +1,15 @@ +- To prevent the optimizer from planning a full scan for a specific table, specify the `NO_FULL_SCAN` index hint. For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + SELECT * FROM table_name@{NO_FULL_SCAN}; + ~~~ + +- To prevent a full scan of a [partial index]({% link {{ page.version.version }}/partial-indexes.md %}) for a specific table, you must specify `NO_FULL_SCAN` in combination with the index name using [`FORCE_INDEX`]({% link {{ page.version.version }}/table-expressions.md %}#force-index-selection). For example: + + {% include_cached copy-clipboard.html %} + ~~~ sql + SELECT * FROM table_name@{FORCE_INDEX=index_name,NO_FULL_SCAN} WHERE b > 0; + ~~~ + + This forces a constrained scan of the partial index. If a constrained scan of the partial index is not possible, an error is returned. \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/partially-visible-indexes.md b/src/current/_includes/v25.3/sql/partially-visible-indexes.md new file mode 100644 index 00000000000..83f251c8374 --- /dev/null +++ b/src/current/_includes/v25.3/sql/partially-visible-indexes.md @@ -0,0 +1 @@ +For the purposes of [index recommendations]({% link {{ page.version.version }}/explain.md %}#success-responses), partially visible indexes are treated as [not visible]({% link {{ page.version.version }}/alter-index.md %}#not-visible). If a partially visible index can be used to improve a query plan, the {% if page.name != "cost-based-optimizer.md" %}[optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}){% else %}optimizer{% endif %} will recommend making it fully visible. For an example, refer to [Set an index as partially visible]({% link {{ page.version.version }}/alter-index.md %}#set-an-index-as-partially-visible). \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/physical-plan-url.md b/src/current/_includes/v25.3/sql/physical-plan-url.md new file mode 100644 index 00000000000..7ad2957a996 --- /dev/null +++ b/src/current/_includes/v25.3/sql/physical-plan-url.md @@ -0,0 +1 @@ +The generated physical statement plan is encoded into a byte string after the [fragment identifier (`#`)](https://wikipedia.org/wiki/Fragment_identifier) in the generated URL. The fragment is not sent to the web server; instead, the browser waits for the web server to return a `decode.html` resource, and then JavaScript on the web page decodes the fragment into a physical statement plan diagram. The statement plan is, therefore, not logged by a server external to the CockroachDB cluster and not exposed to the public internet. diff --git a/src/current/_includes/v25.3/sql/preloaded-databases.md b/src/current/_includes/v25.3/sql/preloaded-databases.md new file mode 100644 index 00000000000..f3418633c98 --- /dev/null +++ b/src/current/_includes/v25.3/sql/preloaded-databases.md @@ -0,0 +1,12 @@ +New clusters and existing clusters [upgraded]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}) to {{ page.version.version }} or later will include auto-generated databases, with the following purposes: + +- The empty `defaultdb` database is used if a client does not specify a database in the [connection parameters]({% link {{ page.version.version }}/connection-parameters.md %}). +- The `movr` database contains data about users, vehicles, and rides for the vehicle-sharing app [MovR]({% link {{ page.version.version }}/movr.md %}) (only when the cluster is started using the [`demo` command]({% link {{ page.version.version }}/cockroach-demo.md %})). +- The empty `postgres` database is provided for compatibility with PostgreSQL client applications that require it. +- The `system` database contains CockroachDB metadata and is read-only. + +All databases except for the `system` database can be [deleted]({% link {{ page.version.version }}/drop-database.md %}) if they are not needed. + +{{site.data.alerts.callout_danger}} +Do not query the `system` database directly. Instead, use objects within the [system catalogs]({% link {{ page.version.version }}/system-catalogs.md %}). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/privileges.md b/src/current/_includes/v25.3/sql/privileges.md new file mode 100644 index 00000000000..6d1d02cd516 --- /dev/null +++ b/src/current/_includes/v25.3/sql/privileges.md @@ -0,0 +1,37 @@ +Privilege | Levels | Description +----------|--------|------------ +`ALL` | System, Database, Schema, Table, Sequence, Type | For the object to which `ALL` is applied, grants all privileges at the system, database, schema, table, sequence, or type level. +`BACKUP` | System, Database, Table | Grants the ability to create [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) at the system, database, or table level. + `BYPASSRLS` | Table | Grants the ability to bypass [row-level security (RLS)]({% link {{ page.version.version }}/row-level-security.md %}) policies on a table. This privilege controls the access from an RLS perspective only; the user also needs sufficient [`GRANT`]({% link {{ page.version.version }}/grant.md %}) privileges to read or write to the table. +`CANCELQUERY` | System | Grants the ability to cancel queries. +`CHANGEFEED` | Table | Grants the ability to create [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) on a table. +`CONNECT` | Database | Grants the ability to view a database's metadata, which consists of objects in a database's `information_schema` and `pg_catalog` system catalogs. This allows the role to view the database's table, schemas, user-defined types, and list the database when running `SHOW DATABASES`. The `CONNECT` privilege is also required to run backups of the database. +`CONTROLJOB` | System | Grants the ability to [pause]({% link {{ page.version.version }}/pause-job.md %}), [resume]({% link {{ page.version.version }}/resume-job.md %}), and [cancel]({% link {{ page.version.version }}/cancel-job.md %}) jobs. Non-admin roles cannot control jobs created by admin roles. +`CREATE` | Database, Schema, Table, Sequence | Grants the ability to create objects at the database, schema, table, or sequence level. When applied at the database level, grants the ability to configure [multi-region zone configs]({% link {{ page.version.version }}/zone-config-extensions.md %}). In CockroachDB v23.2 and later, the [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) `sql.auth.public_schema_create_privilege.enabled` controls whether users receive `CREATE` privileges on the public schema or not. The setting applies at the time that the [public schema is created]({% link {{ page.version.version }}/create-schema.md %}), which happens whenever [a database is created]({% link {{ page.version.version }}/create-database.md %}). The setting is `true` by default, but can be set to `false` for increased compatibility with [PostgreSQL version 15](https://www.postgresql.org/about/news/postgresql-15-released-2526/) as described in [this commit](https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=b073c3ccd06e4cb845e121387a43faa8c68a7b62). +`CREATEDB` | System | Grants the ability to [create]({% link {{ page.version.version }}/create-database.md %}) or [rename]({% link {{ page.version.version }}/alter-database.md %}#rename-to) a database. +`CREATELOGIN` | System | Grants the ability to manage authentication using the `WITH PASSWORD`, `VALID UNTIL`, and `LOGIN`/`NOLOGIN` role options. +`CREATEROLE` | System | Grants the ability to [create]({% link {{ page.version.version }}/create-role.md %}), modify, or [delete]({% link {{ page.version.version }}/drop-role.md %}) non-admin roles. +`DELETE` | Table, Sequence | Grants the ability to delete objects at the table or sequence level. +`DROP` | Database, Table, Sequence | Grants the ability to drop objects at the database, table, or sequence level. +`EXECUTE` | Function | Grants the ability to execute [functions]({% link {{ page.version.version }}/functions-and-operators.md %}). +`EXTERNALCONNECTION` | System | Grants the ability to connect to external systems such as object stores, key management systems, Kafka feeds, or external file systems. Often used in conjunction with the `BACKUP`, `RESTORE`, and `CHANGEFEED` privilege. +`EXTERNALIOIMPLICITACCESS` | System | Grants the ability to interact with external resources that require implicit access. +`INSERT` | Table, Sequence | Grants the ability to insert objects at the table or sequence level. +`MODIFYCLUSTERSETTING` | System | Grants the ability to modify [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}). +`MODIFYSQLCLUSTERSETTING` | System | Grants the ability to modify SQL [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) (cluster settings prefixed with `sql.`). +`NOSQLLOGIN` | System | Prevents roles from connecting to the SQL interface of a cluster. +**Deprecated** `REPLICATION` | System | As of v25.2 `REPLICATION` is **deprecated**. Instead, use the `REPLICATIONSOURCE` and `REPLICATIONDEST` privileges at the table level. Grants the ability to create a [logical data replication]({% link {{ page.version.version }}/logical-data-replication-overview.md %}) or [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) stream. +`REPLICATIONDEST` | Table | Grants the ability to run logical data replication into an existing table on the destination cluster. For more details, refer to the [Set Up Logical Data Replication]({% link {{ page.version.version }}/set-up-logical-data-replication.md %}) tutorial. +`REPLICATIONSOURCE` | Table | Grants the ability to run logical data replication from a table on the source cluster. For more details, refer to the [Set Up Logical Data Replication]({% link {{ page.version.version }}/set-up-logical-data-replication.md %}) tutorial. +`RESTORE` | System, Database | Grants the ability to restore [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) at the system or database level. Refer to `RESTORE` [Required privileges]({% link {{ page.version.version }}/restore.md %}#required-privileges) for more details. +`SELECT` | Table, Sequence | Grants the ability to run [selection queries]({% link {{ page.version.version }}/query-data.md %}) at the table or sequence level. +`UPDATE` | Table, Sequence | Grants the ability to run [update statements]({% link {{ page.version.version }}/update-data.md %}) at the table or sequence level. +`USAGE` | Schema, Sequence, Type | Grants the ability to use [schemas]({% link {{ page.version.version }}/schema-design-overview.md %}), [sequences]({% link {{ page.version.version }}/create-sequence.md %}), or [user-defined types]({% link {{ page.version.version }}/create-type.md %}). +`VIEWACTIVITY` | System | Grants the ability to view other user's activity statistics of a cluster. +`VIEWACTIVITYREDACTED` | System | Grants the ability to view other user's activity statistics, but prevents the role from accessing the statement diagnostics bundle in the DB Console, and viewing some columns in introspection queries that contain data about the cluster. +`VIEWCLUSTERMETADATA` | System | Grants the ability to view range information, data distribution, store information, and Raft information. +`VIEWCLUSTERSETTING` | System | Grants the ability to view [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) and their values. +`VIEWDEBUG` | System | Grants the ability to view the [Advanced Debug Page]({% link {{ page.version.version }}/ui-debug-pages.md %}) of the DB Console and work with the debugging and profiling endpoints. +`VIEWJOB` | System | Grants the ability to view [jobs]({% link {{ page.version.version }}/show-jobs.md %}) on the cluster. +`VIEWSYSTEMTABLE` | System | Grants read-only access (`SELECT`) on all tables in the `system` database, without granting the ability to modify the cluster. This privilege was introduced in v23.1.11. +`ZONECONFIG` | Database, Table, Sequence | Grants the ability to configure [replication zones]({% link {{ page.version.version }}/configure-replication-zones.md %}) at the database, table, and sequence level. diff --git a/src/current/_includes/v25.3/sql/querying-partitions.md b/src/current/_includes/v25.3/sql/querying-partitions.md new file mode 100644 index 00000000000..4491428eada --- /dev/null +++ b/src/current/_includes/v25.3/sql/querying-partitions.md @@ -0,0 +1,163 @@ +## Query partitions + +Similar to [indexes]({% link {{ page.version.version }}/indexes.md %}), partitions can improve query performance by limiting the numbers of rows that a query must scan. In the case of [geo-partitioned data]({% link {{ page.version.version }}/regional-tables.md %}), partitioning can limit a query scan to data in a specific region. + +### Filter on an indexed column + +If you filter the query of a partitioned table on a [column in the index directly following the partition prefix]({% link {{ page.version.version }}/indexes.md %}), the [cost-based optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) creates a query plan that scans each partition in parallel, rather than performing a costly sequential scan of the entire table. + +For example, suppose that the tables in the [`movr`]({% link {{ page.version.version }}/movr.md %}) database are geo-partitioned by region, and you want to query the `users` table for information about a specific user. + +Here is the `CREATE TABLE` statement for the `users` table: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW CREATE TABLE users; +~~~ + +~~~ + table_name | create_statement ++------------+-------------------------------------------------------------------------------------+ + users | CREATE TABLE users ( + | id UUID NOT NULL, + | city VARCHAR NOT NULL, + | name VARCHAR NULL, + | address VARCHAR NULL, + | credit_card VARCHAR NULL, + | CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC), + | FAMILY "primary" (id, city, name, address, credit_card) + | ) PARTITION BY LIST (city) ( + | PARTITION us_west VALUES IN (('seattle'), ('san francisco'), ('los angeles')), + | PARTITION us_east VALUES IN (('new york'), ('boston'), ('washington dc')), + | PARTITION europe_west VALUES IN (('amsterdam'), ('paris'), ('rome')) + | ); + | ALTER PARTITION europe_west OF INDEX movr.public.users@primary CONFIGURE ZONE USING + | constraints = '[+region=europe-west1]'; + | ALTER PARTITION us_east OF INDEX movr.public.users@primary CONFIGURE ZONE USING + | constraints = '[+region=us-east1]'; + | ALTER PARTITION us_west OF INDEX movr.public.users@primary CONFIGURE ZONE USING + | constraints = '[+region=us-west1]' +(1 row) +~~~ + +If you know the user's id, you can filter on the `id` column: + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM users WHERE id='00000000-0000-4000-8000-000000000000'; +~~~ + +~~~ + id | city | name | address | credit_card ++--------------------------------------+----------+---------------+----------------------+-------------+ + 00000000-0000-4000-8000-000000000000 | new york | Robert Murphy | 99176 Anderson Mills | 8885705228 +(1 row) +~~~ + +An [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}) statement shows more detail about the cost-based optimizer's plan: + +{% include_cached copy-clipboard.html %} +~~~ sql +> EXPLAIN SELECT * FROM users WHERE id='00000000-0000-4000-8000-000000000000'; +~~~ + +~~~ + tree | field | description ++------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | distributed | true + | vectorized | false + scan | | + | table | users@primary + | spans | -/"amsterdam" /"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"amsterdam\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"boston" /"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"boston\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"los angeles" /"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"los angeles\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"new york" /"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"new york\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"paris" /"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"paris\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"rome" /"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"rome\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"san francisco" /"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"san francisco\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"seattle" /"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"seattle\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"washington dc" /"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"washington dc\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"- + | filter | id = '00000000-0000-4000-8000-000000000000' +(6 rows) +~~~ + +Because the `id` column is in the primary index, directly after the partition prefix (`city`), the optimal query is constrained by the partitioned values. This means the query scans each partition in parallel for the unique `id` value. + +If you know the set of all possible partitioned values, adding a check constraint to the table's create statement can also improve performance. For example: + +{% include_cached copy-clipboard.html %} +~~~ sql +> ALTER TABLE users ADD CONSTRAINT check_city CHECK (city IN ('amsterdam','boston','los angeles','new york','paris','rome','san francisco','seattle','washington dc')); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> EXPLAIN SELECT * FROM users WHERE id='00000000-0000-4000-8000-000000000000'; +~~~ + +~~~ + tree | field | description ++------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | distributed | false + | vectorized | false + scan | | + | table | users@primary + | spans | /"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# + | parallel | +(6 rows) +~~~ + + +To see the performance improvement over a query that performs a full table scan, compare these queries to a query with a filter on a column that is not in the index. + +### Filter on a non-indexed column + +Suppose that you want to query the `users` table for information about a specific user, but you only know the user's name. + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM users WHERE name='Robert Murphy'; +~~~ + +~~~ + id | city | name | address | credit_card ++--------------------------------------+----------+---------------+----------------------+-------------+ + 00000000-0000-4000-8000-000000000000 | new york | Robert Murphy | 99176 Anderson Mills | 8885705228 +(1 row) +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> EXPLAIN SELECT * FROM users WHERE name='Robert Murphy'; +~~~ + +~~~ + tree | field | description ++------+-------------+------------------------+ + | distributed | true + | vectorized | false + scan | | + | table | users@primary + | spans | ALL + | filter | name = 'Robert Murphy' +(6 rows) +~~~ + +The query returns the same result, but because `name` is not an indexed column, the query performs a full table scan that spans across all partition values. + +### Filter on a partitioned column + +If you know which partition contains the data that you are querying, using a filter (e.g., a [`WHERE` clause]({% link {{ page.version.version }}/select-clause.md %}#filter-rows)) on the column that is used for the partition can further improve performance by limiting the scan to the specific partition(s) that contain the data that you are querying. + +Now suppose that you know the user's name and location. You can query the table with a filter on the user's name and city: + +{% include_cached copy-clipboard.html %} +~~~ sql +> EXPLAIN SELECT * FROM users WHERE name='Robert Murphy' AND city='new york'; +~~~ + +~~~ + tree | field | description ++------+-------------+-----------------------------------+ + | distributed | true + | vectorized | false + scan | | + | table | users@primary + | spans | /"new york"-/"new york"/PrefixEnd + | filter | name = 'Robert Murphy' +(6 rows) +~~~ + +The table returns the same results as before, but at a much lower cost, as the query scan now spans just the `new york` partition value. diff --git a/src/current/_includes/v25.3/sql/range-splits.md b/src/current/_includes/v25.3/sql/range-splits.md new file mode 100644 index 00000000000..be16d064f5d --- /dev/null +++ b/src/current/_includes/v25.3/sql/range-splits.md @@ -0,0 +1,7 @@ +CockroachDB breaks data into ranges. By default, CockroachDB attempts to keep ranges below [the default range size]({% link {{ page.version.version }}/configure-replication-zones.md %}#range-max-bytes). To do this, the system will automatically [split a range]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-splits) if it grows larger than this limit. For most use cases, this automatic range splitting is sufficient, and you should never need to worry about when or where the system decides to split ranges. + +However, there are reasons why you may want to perform manual splits on the ranges that store tables or indexes: + +- When a table only consists of a single range, all writes and reads to the table will be served by that range's [leaseholder]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases). If a table only holds a small amount of data but is serving a large amount of traffic, load distribution can become unbalanced and a [hotspot]({% link {{ page.version.version }}/understand-hotspots.md %}) can occur. Splitting the table's ranges manually can allow the load on the table to be more evenly distributed across multiple nodes. For tables consisting of more than a few ranges, load will naturally be distributed across multiple nodes and this will not be a concern. + +- When a table is created, it will only consist of a single range. If you know that a new table will immediately receive significant write traffic, you may want to preemptively split the table based on the expected distribution of writes before applying the load. This can help avoid reduced workload performance that results when automatic splits are unable to keep up with write traffic and a [hotspot]({% link {{ page.version.version }}/understand-hotspots.md %}) occurs. diff --git a/src/current/_includes/v25.3/sql/regional-by-row-table-description.md b/src/current/_includes/v25.3/sql/regional-by-row-table-description.md new file mode 100644 index 00000000000..6483294d044 --- /dev/null +++ b/src/current/_includes/v25.3/sql/regional-by-row-table-description.md @@ -0,0 +1,19 @@ +In a _regional by row_ table, individual rows are optimized for access from different home regions. Each row's home region is specified in a hidden [`crdb_region` column]({% link {{ page.version.version }}/alter-table.md %}#crdb_region), and is by default the region of the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) from which the row is inserted. The `REGIONAL BY ROW` setting automatically divides a table and all of [its indexes]({% link {{ page.version.version }}/table-localities.md %}#indexes-on-regional-by-row-tables) into [partitions]({% link {{ page.version.version }}/partitioning.md %}) that use `crdb_region` as the prefix. + +Use regional by row tables when your application requires low-latency reads and writes at a row level where individual rows are primarily accessed from a single region. For an example of a table in a multi-region cluster that can benefit from the `REGIONAL BY ROW` setting, see the `users` table from the [MovR application]({% link {{ page.version.version }}/movr.md %}), which could store users' data in specific regions for better performance. + +To take advantage of regional by row tables: + +- Use unique key lookups or queries with [`LIMIT`]({% link {{ page.version.version }}/limit-offset.md %}) clauses to enable [locality optimized searches]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) that prioritize rows in the gateway node's region. If there is a possibility that the results of the query all live in local rows, CockroachDB will first search for rows in the gateway node's region. The search only continues in remote regions if rows in the local region did not satisfy the query. + +- Use [foreign keys]({% link {{ page.version.version }}/foreign-key.md %}#rules-for-creating-foreign-keys) that reference the [`crdb_region` column]({% link {{ page.version.version }}/alter-table.md %}#crdb_region) in [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables, unless [auto-rehoming is enabled]({% link {{ page.version.version }}/alter-table.md %}#turn-on-auto-rehoming-for-regional-by-row-tables) for those tables. + +- [Turn on auto-rehoming for regional by row tables]({% link {{ page.version.version }}/alter-table.md %}#turn-on-auto-rehoming-for-regional-by-row-tables). A row's home region will be automatically set to the gateway region of any [`UPDATE`]({% link {{ page.version.version }}/update.md %}) or [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statements that write to those rows. + +For instructions showing how to set a table's locality to `REGIONAL BY ROW` and configure the home regions of its rows, see [`ALTER TABLE ... SET LOCALITY`]({% link {{ page.version.version }}/alter-table.md %}#crdb_region). + +For more information on regional by row tables, see the [Cockroach Labs blog post](https://www.cockroachlabs.com/blog/regional-by-row/). + +{{site.data.alerts.callout_danger}} +{% include {{page.version.version}}/known-limitations/secondary-regions-with-regional-by-row-tables.md %} +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/regional-table-description.md b/src/current/_includes/v25.3/sql/regional-table-description.md new file mode 100644 index 00000000000..e4d60da08db --- /dev/null +++ b/src/current/_includes/v25.3/sql/regional-table-description.md @@ -0,0 +1,5 @@ +In a _regional_ table, access to the table will be fast in the table's home region and slower in other regions. In other words, CockroachDB optimizes access to data in a regional table from a single region. By default, a regional table's home region is the [database's primary region]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions), but that can be changed to use any region in the database. Regional tables work well when your application requires low-latency reads and writes for an entire table from a single region. + +For instructions showing how to set a table's locality to `REGIONAL BY TABLE` and configure its home region, see [`ALTER TABLE ... SET LOCALITY`]({% link {{ page.version.version }}/alter-table.md %}#regional-by-table). + +By default, all tables in a multi-region database are _regional_ tables that use the database's primary region. Unless you know your application needs different performance characteristics than regional tables provide, there is no need to change this setting. diff --git a/src/current/_includes/v25.3/sql/rename-index.md b/src/current/_includes/v25.3/sql/rename-index.md new file mode 100644 index 00000000000..b92cec05255 --- /dev/null +++ b/src/current/_includes/v25.3/sql/rename-index.md @@ -0,0 +1,49 @@ +### Rename an index + +{% include_cached copy-clipboard.html %} +~~~ sql +> CREATE INDEX on users(name); +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW INDEXES FROM users; +~~~ + +~~~ + table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit | visible +-------------+------------+------------+--------------+-------------+-----------+---------+----------+---------- + users | name_idx | t | 1 | name | DESC | f | f | t + users | name_idx | t | 2 | city | ASC | f | t | t + users | name_idx | t | 3 | id | ASC | f | t | t + users | users_pkey | f | 1 | city | ASC | f | f | t + users | users_pkey | f | 2 | id | ASC | f | f | t + users | users_pkey | f | 3 | name | N/A | t | f | t + users | users_pkey | f | 4 | address | N/A | t | f | t + users | users_pkey | f | 5 | credit_card | N/A | t | f | t +(8 rows) +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> ALTER INDEX users@name_idx RENAME TO users_name_idx; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SHOW INDEXES FROM users; +~~~ + +~~~ + table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit | visible +-------------+----------------+------------+--------------+-------------+-----------+---------+----------+---------- + users | users_name_idx | t | 1 | name | DESC | f | f | t + users | users_name_idx | t | 2 | city | ASC | f | t | t + users | users_name_idx | t | 3 | id | ASC | f | t | t + users | users_pkey | f | 1 | city | ASC | f | f | t + users | users_pkey | f | 2 | id | ASC | f | f | t + users | users_pkey | f | 3 | name | N/A | t | f | t + users | users_pkey | f | 4 | address | N/A | t | f | t + users | users_pkey | f | 5 | credit_card | N/A | t | f | t +(8 rows) +~~~ diff --git a/src/current/_includes/v25.3/sql/replication-zone-patterns-to-multiregion-sql-mapping.md b/src/current/_includes/v25.3/sql/replication-zone-patterns-to-multiregion-sql-mapping.md new file mode 100644 index 00000000000..6fdea66fc89 --- /dev/null +++ b/src/current/_includes/v25.3/sql/replication-zone-patterns-to-multiregion-sql-mapping.md @@ -0,0 +1,5 @@ +| Replication Zone Pattern | Multi-Region SQL | +|--------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Duplicate indexes]({% link v20.2/topology-duplicate-indexes.md %}) | [`GLOBAL` tables]({% link {{ page.version.version }}/global-tables.md %}) | +| [Geo-partitioned replicas]({% link v20.2/topology-geo-partitioned-replicas.md %}) | [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/regional-tables.md %}#regional-by-row-tables) with [`ZONE` survival goals](multiregion-survival-goals.html#survive-zone-failures) | +| [Geo-partitioned leaseholders]({% link v20.2/topology-geo-partitioned-leaseholders.md %}) | [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/regional-tables.md %}#regional-by-row-tables) with [`REGION` survival goals](multiregion-survival-goals.html#survive-region-failures) | diff --git a/src/current/_includes/v25.3/sql/retry-savepoints.md b/src/current/_includes/v25.3/sql/retry-savepoints.md new file mode 100644 index 00000000000..bc3454195c0 --- /dev/null +++ b/src/current/_includes/v25.3/sql/retry-savepoints.md @@ -0,0 +1 @@ +A savepoint defined with the name `cockroach_restart` is a "retry savepoint" and is used to implement [advanced client-side transaction retries]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}). For more information, see [Retry savepoints]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}#retry-savepoints). diff --git a/src/current/_includes/v25.3/sql/role-options.md b/src/current/_includes/v25.3/sql/role-options.md new file mode 100644 index 00000000000..9a84741dd1e --- /dev/null +++ b/src/current/_includes/v25.3/sql/role-options.md @@ -0,0 +1,17 @@ +Role option | Description +------------|------------- + `BYPASSRLS`/`NOBYPASSRLS` | : Allow or disallow a role to bypass [row-level security (RLS)]({% link {{ page.version.version }}/row-level-security.md %}) policies on a table. This option controls the access from an RLS perspective only; the user also needs sufficient [`GRANT`]({% link {{ page.version.version }}/grant.md %}) privileges to read or write to the table. +`CANCELQUERY`/`NOCANCELQUERY` | **Deprecated in v22.2: Use the `CANCELQUERY` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to cancel [queries]({% link {{ page.version.version }}/cancel-query.md %}) and [sessions]({% link {{ page.version.version }}/cancel-session.md %}) of other roles. Without this role option, roles can only cancel their own queries and sessions. Even with the `CANCELQUERY` role option, non-`admin` roles cannot cancel `admin` queries or sessions. This option should usually be combined with `VIEWACTIVITY` so that the role can view other roles' query and session information.

By default, the role option is set to `NOCANCELQUERY` for all non-`admin` roles. +`CONTROLCHANGEFEED`/`NOCONTROLCHANGEFEED` | **Deprecated in v23.1: Use the `CHANGEFEED` [privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to run [`CREATE CHANGEFEED`]({% link {{ page.version.version }}/create-changefeed.md %}) on tables they have `SELECT` privileges on.

By default, the role option is set to `NOCONTROLCHANGEFEED` for all non-`admin` roles. +`CONTROLJOB`/`NOCONTROLJOB` | Allow or disallow a role to [pause]({% link {{ page.version.version }}/pause-job.md %}), [resume]({% link {{ page.version.version }}/resume-job.md %}), and [cancel]({% link {{ page.version.version }}/cancel-job.md %}) jobs. Non-`admin` roles cannot control jobs created by `admin` roles.

By default, the role option is set to `NOCONTROLJOB` for all non-`admin` roles. +`CREATEDB`/`NOCREATEDB` | Allow or disallow a role to [create]({% link {{ page.version.version }}/create-database.md %}) or [rename]({% link {{ page.version.version }}/alter-database.md %}#rename-to) a database. The role is assigned as the owner of the database.

By default, the role option is set to `NOCREATEDB` for all non-`admin` roles. +`CREATELOGIN`/`NOCREATELOGIN` | Allow or disallow a role to manage authentication using the `WITH PASSWORD`, `VALID UNTIL`, and `LOGIN/NOLOGIN` role options.

By default, the role option is set to `NOCREATELOGIN` for all non-`admin` roles. +`CREATEROLE`/`NOCREATEROLE` | Allow or disallow the new role to [create]({% link {{ page.version.version }}/create-role.md %}), alter, and [drop]({% link {{ page.version.version }}/drop-role.md %}) other non-`admin` roles.

By default, the role option is set to `NOCREATEROLE` for all non-`admin` roles. +`LOGIN`/`NOLOGIN` | Allow or disallow a role to log in with one of the [client authentication methods]({% link {{ page.version.version }}/authentication.md %}#client-authentication). Setting the role option to `NOLOGIN` prevents the role from logging in using any authentication method. +`MODIFYCLUSTERSETTING`/`NOMODIFYCLUSTERSETTING` | Allow or disallow a role to modify the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) with the `sql.defaults` prefix.

By default, the role option is set to `NOMODIFYCLUSTERSETTING` for all non-`admin` roles. +`PASSWORD password`/`PASSWORD NULL` | The credential the role uses to [authenticate their access to a secure cluster]({% link {{ page.version.version }}/authentication.md %}#client-authentication). A password should be entered as a [string literal]({% link {{ page.version.version }}/sql-constants.md %}#string-literals). For compatibility with PostgreSQL, a password can also be entered as an identifier.

To prevent a role from using [password authentication]({% link {{ page.version.version }}/authentication.md %}#client-authentication) and to mandate [certificate-based client authentication]({% link {{ page.version.version }}/authentication.md %}#client-authentication), [set the password as `NULL`]({% link {{ page.version.version }}/create-role.md %}#prevent-a-role-from-using-password-authentication). +`SQLLOGIN`/`NOSQLLOGIN` | **Deprecated in v22.2: Use the `NOSQLLOGIN` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to log in using the SQL CLI with one of the [client authentication methods]({% link {{ page.version.version }}/authentication.md %}#client-authentication). The role option to `NOSQLLOGIN` prevents the role from logging in using the SQL CLI with any authentication method while retaining the ability to log in to DB Console. It is possible to have both `NOSQLLOGIN` and `LOGIN` set for a role and `NOSQLLOGIN` takes precedence on restrictions.

Without any role options all login behavior is permitted. +`VALID UNTIL` | The date and time (in the [`timestamp`]({% link {{ page.version.version }}/timestamp.md %}) format) after which the [password](#parameters) is not valid. +`VIEWACTIVITY`/`NOVIEWACTIVITY` | **Deprecated in v22.2: Use the `VIEWACTIVITY` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to see other roles' [queries]({% link {{ page.version.version }}/show-statements.md %}) and [sessions]({% link {{ page.version.version }}/show-sessions.md %}) using `SHOW STATEMENTS`, `SHOW SESSIONS`, and the [**Statements**](ui-statements-page.html) and [**Transactions**](ui-transactions-page.html) pages in the DB Console. `VIEWACTIVITY` also permits visibility of node hostnames and IP addresses in the DB Console. With `NOVIEWACTIVITY`, the `SHOW` commands show only the role's own data, and DB Console pages redact node hostnames and IP addresses.

By default, the role option is set to `NOVIEWACTIVITY` for all non-`admin` roles. +`VIEWCLUSTERSETTING` / `NOVIEWCLUSTERSETTING` | **Deprecated in v22.2: Use the `VIEWCLUSTERSETTING` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to view the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) with `SHOW CLUSTER SETTING` or to access the [**Cluster Settings**]({% link {{ page.version.version }}/ui-debug-pages.md %}) page in the DB Console.

By default, the role option is set to `NOVIEWCLUSTERSETTING` for all non-`admin` roles. +`VIEWACTIVITYREDACTED`/`NOVIEWACTIVITYREDACTED` | **Deprecated in v22.2: Use the `VIEWACTIVITYREDACTED` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to see other roles' queries and sessions using `SHOW STATEMENTS`, `SHOW SESSIONS`, and the Statements and Transactions pages in the DB Console. With `VIEWACTIVITYREDACTED`, a user will not have access to the usage of statements diagnostics bundle (which can contain PII information) in the DB Console, and will not be able to list queries containing [constants]({% link {{ page.version.version }}/sql-constants.md %}) for other users when using the `listSessions` endpoint through the [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}). It is possible to have both `VIEWACTIVITY` and `VIEWACTIVITYREDACTED`, and `VIEWACTIVITYREDACTED` takes precedence on restrictions. If the user has `VIEWACTIVITY` but doesn't have `VIEWACTIVITYREDACTED`, they will be able to see DB Console pages and have access to the statements diagnostics bundle.

By default, the role option is set to `NOVIEWACTIVITYREDACTED` for all non-`admin` roles. diff --git a/src/current/_includes/v25.3/sql/role-subject-option.md b/src/current/_includes/v25.3/sql/role-subject-option.md new file mode 100644 index 00000000000..c8a71e9af1b --- /dev/null +++ b/src/current/_includes/v25.3/sql/role-subject-option.md @@ -0,0 +1 @@ +You can associate an [X.509](https://en.wikipedia.org/wiki/X.509) certificate's Subject with a [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) as shown below. Note that the Subject fields in the certificate have to be an exact match with what you pass in via the SQL statement. By exact match, we mean that the order of attributes passed in via the SQL statement must match the order of attributes in the certificate. diff --git a/src/current/_includes/v25.3/sql/row-level-security-enabled.md b/src/current/_includes/v25.3/sql/row-level-security-enabled.md new file mode 100644 index 00000000000..42ff1beea3d --- /dev/null +++ b/src/current/_includes/v25.3/sql/row-level-security-enabled.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +RLS applies to a table **only when explicitly enabled** using `ALTER TABLE ... ENABLE ROW LEVEL SECURITY`. Roles exempt from RLS policies include [admins]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles), [table owners]({% link {{ page.version.version }}/security-reference/authorization.md %}#object-ownership) (unless the table is set to [`FORCE ROW LEVEL SECURITY`](#force-row-level-security)), and [roles with `BYPASSRLS`]({% link {{ page.version.version }}/alter-role.md %}#allow-a-role-to-bypass-row-level-security-rls). +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/row-level-ttl-prefer-ttl-expiration-expressions.md b/src/current/_includes/v25.3/sql/row-level-ttl-prefer-ttl-expiration-expressions.md new file mode 100644 index 00000000000..a75ed05e6c6 --- /dev/null +++ b/src/current/_includes/v25.3/sql/row-level-ttl-prefer-ttl-expiration-expressions.md @@ -0,0 +1,5 @@ +Most users should use `ttl_expiration_expression` instead of `ttl_expire_after` for the following reasons: + +- If you add `ttl_expire_after` to an existing table, it **will cause a full table rewrite, which can affect performance**. Specifically, it will result in a [schema change]({% link {{ page.version.version }}/online-schema-changes.md %}) that (1) creates a new [hidden column]({% link {{page.version.version}}/show-create.md%}#show-the-create-table-statement-for-a-table-with-a-hidden-column) `crdb_internal_expiration` for all rows, and (2) backfills the value of that new column to `now()` + `ttl_expire_after`. +- You cannot use `ttl_expire_after` with an existing [`TIMESTAMPTZ`]({% link {{ page.version.version }}/timestamp.md %}) column. +- If you use `ttl_expiration_expression`, you can use an existing [`TIMESTAMPTZ`]({% link {{ page.version.version }}/timestamp.md %}) column called e.g. `updated_at`. diff --git a/src/current/_includes/v25.3/sql/row-level-ttl.md b/src/current/_includes/v25.3/sql/row-level-ttl.md new file mode 100644 index 00000000000..d10ea9b8e87 --- /dev/null +++ b/src/current/_includes/v25.3/sql/row-level-ttl.md @@ -0,0 +1 @@ +CockroachDB has support for Time to Live ("TTL") expiration on table rows, also known as _Row-Level TTL_. Row-Level TTL is a mechanism whereby rows from a table are considered "expired" and can be automatically deleted once those rows have been stored longer than a specified expiration time. diff --git a/src/current/_includes/v25.3/sql/savepoint-ddl-rollbacks.md b/src/current/_includes/v25.3/sql/savepoint-ddl-rollbacks.md new file mode 100644 index 00000000000..57da82ae775 --- /dev/null +++ b/src/current/_includes/v25.3/sql/savepoint-ddl-rollbacks.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_danger}} +Rollbacks to savepoints over [DDL](https://en.wikipedia.org/wiki/Data_definition_language) statements are only supported if you're rolling back to a savepoint created at the beginning of the transaction. +{{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md new file mode 100644 index 00000000000..c6de489e641 --- /dev/null +++ b/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md @@ -0,0 +1 @@ +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). diff --git a/src/current/_includes/v25.3/sql/savepoints-and-row-locks.md b/src/current/_includes/v25.3/sql/savepoints-and-row-locks.md new file mode 100644 index 00000000000..39568092558 --- /dev/null +++ b/src/current/_includes/v25.3/sql/savepoints-and-row-locks.md @@ -0,0 +1,12 @@ +CockroachDB supports exclusive row locks. + +- In PostgreSQL, row locks are released/cancelled upon [`ROLLBACK TO SAVEPOINT`][rts]. +- In CockroachDB, row locks are preserved upon [`ROLLBACK TO SAVEPOINT`][rts]. + +This is an architectural difference that may or may not be lifted in a later CockroachDB version. + +The code of client applications that rely on row locks must be reviewed and possibly modified to account for this difference. In particular, if an application is relying on [`ROLLBACK TO SAVEPOINT`][rts] to release row locks and allow a concurrent transaction touching the same rows to proceed, this behavior will not work with CockroachDB. + +{% comment %} Reference Links {% endcomment %} + +[rts]: rollback-transaction.html diff --git a/src/current/_includes/v25.3/sql/schema-changes.md b/src/current/_includes/v25.3/sql/schema-changes.md new file mode 100644 index 00000000000..c61e9c9a046 --- /dev/null +++ b/src/current/_includes/v25.3/sql/schema-changes.md @@ -0,0 +1 @@ +- Schema changes through [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`DROP DATABASE`]({% link {{ page.version.version }}/drop-database.md %}), [`DROP TABLE`]({% link {{ page.version.version }}/drop-table.md %}), and [`TRUNCATE`](truncate.html) \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/schema-terms.md b/src/current/_includes/v25.3/sql/schema-terms.md new file mode 100644 index 00000000000..d066d5d979b --- /dev/null +++ b/src/current/_includes/v25.3/sql/schema-terms.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +To avoid confusion with the general term "[schema](https://wiktionary.org/wiki/schema)", in this guide we refer to the logical object as a *user-defined schema*, and to the relationship structure of logical objects in a cluster as a *database schema*. +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/select-for-update-example-partial.md b/src/current/_includes/v25.3/sql/select-for-update-example-partial.md new file mode 100644 index 00000000000..62a2bf9c066 --- /dev/null +++ b/src/current/_includes/v25.3/sql/select-for-update-example-partial.md @@ -0,0 +1,50 @@ +This example assumes you are running a [local unsecured cluster]({% link {{ page.version.version }}/start-a-local-cluster.md %}). + +First, connect to the running cluster (call this Terminal 1): + +{% include_cached copy-clipboard.html %} +~~~ shell +cockroach sql --insecure +~~~ + +Next, create a table and insert some rows: + +{% include_cached copy-clipboard.html %} +~~~ sql +CREATE TABLE kv (k INT PRIMARY KEY, v INT); +INSERT INTO kv (k, v) VALUES (1, 5), (2, 10), (3, 15); +~~~ + +Next, we'll start a [transaction]({% link {{ page.version.version }}/transactions.md %}) and lock the row we want to operate on: + +{% include_cached copy-clipboard.html %} +~~~ sql +BEGIN; +SELECT * FROM kv WHERE k = 1 FOR UPDATE; +~~~ + +Press **Enter** twice in the [SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) to send the statements to be evaluated. This will result in the following output: + +~~~ + k | v ++---+----+ + 1 | 5 +(1 row) +~~~ + +Now open another terminal and connect to the database from a second client (call this Terminal 2): + +{% include_cached copy-clipboard.html %} +~~~ shell +cockroach sql --insecure +~~~ + +From Terminal 2, start a transaction and try to lock the same row for updates that is already being accessed by the transaction we opened in Terminal 1: + +{% include_cached copy-clipboard.html %} +~~~ sql +BEGIN; +SELECT * FROM kv WHERE k = 1 FOR UPDATE; +~~~ + +Press **Enter** twice to send the statements to be evaluated. Because Terminal 1 has already locked this row, the `SELECT FOR UPDATE` statement from Terminal 2 will appear to "wait". diff --git a/src/current/_includes/v25.3/sql/select-for-update-overview.md b/src/current/_includes/v25.3/sql/select-for-update-overview.md new file mode 100644 index 00000000000..812b9c0eb17 --- /dev/null +++ b/src/current/_includes/v25.3/sql/select-for-update-overview.md @@ -0,0 +1,22 @@ +{% if page.name != "select-for-update.md" %}`SELECT ... FOR UPDATE` exclusively locks the rows returned by a [selection query][selection], such that other transactions trying to access those rows must wait for the transaction that locked the rows to commit or rollback.{% endif %} + +`SELECT ... FOR UPDATE` can be used to: + +- Strengthen the isolation of a [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transaction. If you need to read and later update a row within a transaction, use `SELECT ... FOR UPDATE` to acquire an exclusive lock on the row. This guarantees data integrity between the transaction's read and write operations. For details, see [Locking reads]({% link {{ page.version.version }}/read-committed.md %}#locking-reads). + +- Order [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) transactions by controlling concurrent access to one or more rows of a table. These other transactions are placed into a queue based on when they tried to read the values of the locked rows. + + Because this queueing happens during the read operation, the [thrashing](https://wikipedia.org/wiki/Thrashing_(computer_science)) that would otherwise occur if multiple concurrently executing transactions attempt to `SELECT` the same data and then `UPDATE` the results of that selection is prevented. By preventing thrashing, `SELECT ... FOR UPDATE` also prevents [transaction retries][retries] that would otherwise occur due to [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). + + As a result, using `SELECT ... FOR UPDATE` leads to increased throughput and decreased tail latency for contended operations. + +Note that using `SELECT ... FOR UPDATE` does not completely eliminate the chance of [serialization errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). These errors can also arise due to [time uncertainty]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#transaction-conflicts). To eliminate the need for application-level retry logic, in addition to `SELECT FOR UPDATE` your application also needs to use a [driver that implements automatic retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). + +{{site.data.alerts.callout_info}} +By default, CockroachDB uses the `SELECT ... FOR UPDATE` locking mechanism during the initial row scan performed in [`UPDATE`]({% link {{ page.version.version }}/update.md %}) and [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statement execution. To turn off implicit `SELECT ... FOR UPDATE` locking for `UPDATE` and `UPSERT` statements, set the `enable_implicit_select_for_update` [session variable]({% link {{ page.version.version }}/set-vars.md %}) to `false`. +{{site.data.alerts.end}} + +{% comment %} Reference Links {% endcomment %} + +[retries]: transactions.html#transaction-retries +[selection]: selection-queries.html diff --git a/src/current/_includes/v25.3/sql/select-lock-strengths.md b/src/current/_includes/v25.3/sql/select-lock-strengths.md new file mode 100644 index 00000000000..fc0b2cd590e --- /dev/null +++ b/src/current/_includes/v25.3/sql/select-lock-strengths.md @@ -0,0 +1,5 @@ +- `SELECT FOR UPDATE` obtains an *exclusive lock* on each qualifying row, blocking concurrent writes and locking reads on the row. Only one transaction can hold an exclusive lock on a row at a time, and only the transaction holding the exclusive lock can write to the row. {% if page.name == "read-committed.md" %}For an example, see [Reserve rows for updates using exclusive locks](#reserve-rows-for-updates-using-exclusive-locks).{% endif %} + +- `SELECT FOR SHARE` obtains a *shared lock* on each qualifying row, blocking concurrent writes and **exclusive** locking reads on the row. Multiple transactions can hold a shared lock on a row at the same time. When multiple transactions hold a shared lock on a row, none can write to the row. A shared lock grants transactions mutual read-only access to a row, and ensures that they read the latest version of the row. {% if page.name == "read-committed.md" %}For an example, see [Reserve values using shared locks](#reserve-row-values-using-shared-locks).{% endif %} + +When a `SELECT FOR UPDATE` or `SELECT FOR SHARE` read is issued on a row, only the latest version of the row is returned to the client. Under {% if page.name == "read-committed.md" %}`READ COMMITTED`{% else %}[`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}){% endif %} isolation, neither statement will block concurrent, non-locking reads. \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/serializable-tutorial.md b/src/current/_includes/v25.3/sql/serializable-tutorial.md new file mode 100644 index 00000000000..2a03b091b58 --- /dev/null +++ b/src/current/_includes/v25.3/sql/serializable-tutorial.md @@ -0,0 +1,3 @@ +{{site.data.alerts.callout_info}} +This tutorial assumes you are running under [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) isolation, which requires client-side retry handling for [serialization errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). +{{site.data.alerts.end}} \ No newline at end of file diff --git a/src/current/_includes/v25.3/sql/server-side-connection-limit.md b/src/current/_includes/v25.3/sql/server-side-connection-limit.md new file mode 100644 index 00000000000..62300b45619 --- /dev/null +++ b/src/current/_includes/v25.3/sql/server-side-connection-limit.md @@ -0,0 +1 @@ +To control the maximum number of non-superuser ([`root`]({% link {{ page.version.version }}/security-reference/authorization.md %}#root-user) user or other [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role)) connections a [gateway node]({% link {{ page.version.version }}/architecture/sql-layer.md %}#gateway-node) can have open at one time, use the `server.max_connections_per_gateway` [cluster setting](cluster-settings.html). If a new non-superuser connection would exceed this limit, the error message `"sorry, too many clients already"` is returned, along with error code `53300`. diff --git a/src/current/_includes/v25.3/sql/set-transaction-as-of-system-time-example.md b/src/current/_includes/v25.3/sql/set-transaction-as-of-system-time-example.md new file mode 100644 index 00000000000..8e758f1c303 --- /dev/null +++ b/src/current/_includes/v25.3/sql/set-transaction-as-of-system-time-example.md @@ -0,0 +1,24 @@ +{% include_cached copy-clipboard.html %} +~~~ sql +> BEGIN; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SET TRANSACTION AS OF SYSTEM TIME '2019-04-09 18:02:52.0+00:00'; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM orders; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> SELECT * FROM products; +~~~ + +{% include_cached copy-clipboard.html %} +~~~ sql +> COMMIT; +~~~ diff --git a/src/current/_includes/v25.3/sql/shell-commands.md b/src/current/_includes/v25.3/sql/shell-commands.md new file mode 100644 index 00000000000..7586108f3d1 --- /dev/null +++ b/src/current/_includes/v25.3/sql/shell-commands.md @@ -0,0 +1,54 @@ +The following commands can be used within the interactive SQL shell: + +Command | Usage +--------|------------ +`\?`,`help` | View this help within the shell. +`\q`,`quit`,`exit`,`ctrl-d` | Exit the shell.
When no text follows the prompt, `ctrl-c` exits the shell as well; otherwise, `ctrl-c` clears the line. +`\!` | Run an external command and print its results to `stdout`. [See an example]({% link {{ page.version.version }}/cockroach-sql.md %}#run-external-commands-from-the-sql-shell). +\| | Run the output of an external command as SQL statements. [See an example]({% link {{ page.version.version }}/cockroach-sql.md %}#run-external-commands-from-the-sql-shell). +`\set