Skip to content

Commit

Permalink
other recipes
Browse files Browse the repository at this point in the history
Signed-off-by: Juraci Paixão Kröhling <[email protected]>
  • Loading branch information
jpkrohling committed May 16, 2024
1 parent bc7e287 commit 201cf85
Show file tree
Hide file tree
Showing 23 changed files with 946 additions and 277 deletions.
29 changes: 11 additions & 18 deletions recipes/grafana/grafana-cloud.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
extensions:
basicauth/traces:
client_auth:
username: "${TRACES_USER_ID}"
password: "${TOKEN}"

basicauth/logs:
client_auth:
username: "${LOGS_USER_ID}"
password: "${TOKEN}"
username: "239237"
password: "..."

processors:
batch:
Expand All @@ -18,23 +13,21 @@ receivers:
grpc:

exporters:
otlp/gct:
endpoint: tempo-us-central1.grafana.net:443
debug:
verbosity: detailed
otlphttp:
endpoint: https://otlp-gateway-prod-us-central-0.grafana.net/otlp
auth:
authenticator: basicauth/traces
loki/gcl:
endpoint: "${LOGS_URL}"
auth:
authenticator: basicauth/logs

service:
extensions: [ basicauth/traces, basicauth/logs ]
extensions: [ basicauth/traces ]
pipelines:
traces:
receivers: [ otlp ]
processors: [ ]
exporters: [ otlp/gct ]
logs:
exporters: [ otlphttp ]
metrics:
receivers: [ otlp ]
processors: [ ]
exporters: [ loki/gcl ]
processors: [ ]
exporters: [ debug, otlphttp ]
19 changes: 19 additions & 0 deletions recipes/kubernetes/client-side-load-balancing/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
This example shows how to get a regular load-balancing done with Kubernetes services. In this demo, we'll have one client instance and initially 3 servers. Then, we increment the number of servers to 10, and watch the metrics. After a couple of minutes, all instances should have a similar number of spans being received.

```console
k3d cluster create
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml
kubectl wait --for=condition=Available deployments/cert-manager -n cert-manager

kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
kubectl wait --for=condition=Available deployments/opentelemetry-operator-controller-manager -n opentelemetry-operator-system

kubectl create -f https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.73.2/bundle.yaml
kubectl wait --for=condition=Available deployments/prometheus-operator -n default

kubectl apply -f resources.yaml
kubectl wait --for=condition=Available deployments/client-collector -n observability

kubectl port-forward -n observability service/client-collector 4317:4317
kubectl port-forward -n observability service/prometheus-operated 9090:9090
```
120 changes: 120 additions & 0 deletions recipes/kubernetes/client-side-load-balancing/resources.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
apiVersion: v1
kind: Namespace
metadata:
name: observability
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: observability
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus
namespace: observability
rules:
- apiGroups: [""]
resources:
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus
namespace: observability
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: observability
---
apiVersion: opentelemetry.io/v1alpha1
kind: OpenTelemetryCollector
metadata:
name: client
namespace: observability
spec:
image: jpkroehling/otelcol-with-4274:latest
config: |
receivers:
otlp:
protocols:
grpc:
processors:
exporters:
otlp:
tls:
insecure: true
endpoint: dns:///server-collector-headless.observability:4317
balancer_name: round_robin
service:
pipelines:
traces:
receivers: [otlp]
processors: []
exporters: [otlp]
---
apiVersion: opentelemetry.io/v1alpha1
kind: OpenTelemetryCollector
metadata:
name: server
namespace: observability
spec:
replicas: 3
config: |
receivers:
otlp:
protocols:
grpc:
processors:
exporters:
logging:
service:
pipelines:
traces:
receivers: [otlp]
processors: []
exporters: [logging]
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: server-monitor
namespace: observability
labels:
scrape: "yes"
spec:
selector:
matchLabels:
app.kubernetes.io/name: server-collector-monitoring
endpoints:
- port: monitoring
---
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
namespace: observability
spec:
serviceAccountName: prometheus
serviceMonitorSelector:
matchLabels:
scrape: "yes"
38 changes: 38 additions & 0 deletions recipes/load-balancing-exporter/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
This example shows how to get a regular load-balancing done with Kubernetes services. In this demo, we'll have one client instance and initially 3 servers. Then, we increment the number of servers to 10, and watch the metrics. After a couple of minutes, all instances should have a similar number of spans being received.

```console
k3d cluster create
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml
kubectl wait --for=condition=Available deployments/cert-manager -n cert-manager

kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
kubectl wait --for=condition=Available deployments/opentelemetry-operator-controller-manager -n opentelemetry-operator-system

kubectl create -f https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.73.2/bundle.yaml
kubectl wait --for=condition=Available deployments/prometheus-operator -n default

kubectl apply -f cr-load-balancer.yaml
kubectl wait --for=condition=Available deployments/loadbalancer-collector -n observability
kubectl wait --for=condition=Available deployments/backends-collector -n observability

kubectl port-forward -n observability service/prometheus-operated 9090:9090
kubectl port-forward -n observability service/loadbalancer-collector 4317:4317
kubectl port-forward -n observability deployments/grafana 3000:3000

# Add a new Prometheus datasource on Grafana, http://prometheus-operated:9090

telemetrygen traces --otlp-insecure --rate 1000 --duration 30m > telemetrygen.log 2>&1 &
sleep 5m

kubectl patch -n observability otelcol backends -p '{"spec":{"replicas":10}}' --type=merge
sleep 5m

kubectl patch -n observability otelcol backends -p '{"spec":{"replicas":5}}' --type=merge
sleep 5m

kubectl patch -n observability otelcol backends -p '{"spec":{"replicas":0}}' --type=merge
sleep 5m

kubectl patch -n observability otelcol backends -p '{"spec":{"replicas":5}}' --type=merge
sleep 5m
```
Loading

0 comments on commit 201cf85

Please sign in to comment.