-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.yaml
More file actions
113 lines (107 loc) · 3.43 KB
/
compose.yaml
File metadata and controls
113 lines (107 loc) · 3.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
services:
observabilidade-api:
platform: linux/arm64
container_name: observabilidade-api
build: #buscar do dockefile (se eu quiser uma imagem pronta eu troco isso para image)
context: ./Observabilidade.Api #caminho onde está alocada o Dockerfile
dockerfile: Dockerfile
secrets:
- github_token
ports:
- "8080:8080" #host:container
environment:
- ASPNETCORE_ENVIRONMENT=Development
- DOTNET_USE_POLLING_FILE_WATCHER=true
- DOTNET_HOST_PATH=/usr/share/dotnet
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4317
depends_on:
otel-collector:
condition: service_healthy
restart: unless-stopped #container reinicia automaticamente se cair
networks:
- observability-net
otel-collector:
platform: linux/arm64
container_name: otel-collector
# image: otel/opentelemetry-collector-contrib:latest
build:
context: ./otel-collector
dockerfile: Dockerfile
command: ["--config", "/etc/otel-collector-config.yaml"]
env_file:
- .env
environment:
- TZ=America/Sao_Paulo
volumes: #compartilha o arquivo do host : com o do container
- ./otel-collector/config.yaml:/etc/otel-collector-config.yaml
ports:
- 1888:1888 # pprof extension
- 8888:8888 # Prometheus metrics exposed by the Collector
- 8889:8889 # Prometheus exporter metrics
- 13133:13133 # health_check extension
- 4317:4317 # OTLP gRPC receiver
- 4318:4318 # OTLP http receiver
- 55679:55679 # zpages extension
healthcheck:
test: ["CMD-SHELL", "curl --max-time 5 -s -f http://localhost:13133 || exit 1"]
interval: 15s
timeout: 10s
retries: 5
depends_on:
elasticsearch:
condition: service_healthy
restart: unless-stopped #container reinicia automaticamente se cair
networks:
- observability-net
elasticsearch:
platform: linux/arm64
container_name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:9.1.2
ports:
- "9200:9200"
environment:
- discovery.type=single-node # Para ambiente de desenvolvimento
- ES_JAVA_OPTS=-Xms1g -Xmx1g # Heap configurável
# - ELASTIC_PASSWORD=${ES_PASSWORD}
- xpack.security.enabled=false
- TZ=America/Sao_Paulo
env_file:
- .env
volumes:
- ./elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- es-data:/usr/share/elasticsearch/data
healthcheck:
test: ["CMD-SHELL", "curl --max-time 5 -s -f http://localhost:9200/_cluster/health | grep -q '\"status\"'"]
interval: 15s
timeout: 10s
retries: 5
restart: unless-stopped #container reinicia automaticamente se cair
networks:
- observability-net
kibana:
platform: linux/arm64
container_name: kibana
image: docker.elastic.co/kibana/kibana:9.1.2
env_file:
- .env
environment:
- TZ=America/Sao_Paulo
volumes:
- ./kibana/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
ports:
- 5601:5601
depends_on:
elasticsearch:
condition: service_healthy
restart: unless-stopped #container reinicia automaticamente se cair
networks:
- observability-net
volumes:
es-data:
secrets:
github_token:
file: ./.github_token
# Definição da rede interna para comunicação entre serviços
networks:
observability-net:
driver: bridge