This repository was archived by the owner on Jul 22, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
146 lines (137 loc) · 3.54 KB
/
docker-compose.yaml
File metadata and controls
146 lines (137 loc) · 3.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
services:
processo-app: #
build: .
container_name: processo-app
ports:
- "8080:8080"
- "5005:5005"
depends_on:
- broker1
- mongodb
environment:
SPRING_KAFKA_BOOTSTRAP_SERVERS: broker1:9092
SPRING_PROFILES_ACTIVE: local
SPRING_DATA_MONGODB_URI: mongodb://mongodb:27017/SBanco?maxPoolSize=50&waitQueueTimeoutMS=2000
JAVA_OPTS: "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005"
restart: unless-stopped
networks:
- kafkabroker
- mongodb
mongodb:
image: mongo:6.0
container_name: mongodb
environment:
- MONGO_INITDB_DATABASE=SBanco
volumes:
- mongodb_data:/data/db
ports:
- "27017:27017"
healthcheck:
test: [ "CMD", "mongosh", "--eval", "db.runCommand('ping').ok" ]
interval: 10s
timeout: 5s
retries: 5
networks:
- mongodb
zookeeper:
image: confluentinc/cp-zookeeper:6.2.0
container_name: zookeeper
environment:
- ZOOKEEPER_CLIENT_PORT=2181
- ZOOKEEPER_TICK_TIME=2000
- ZOOKEEPER_LOG4J_ROOT_LOGLEVEL=${ZOOKEEPER_LOG4J_ROOT_LOGLEVEL:-INFO}
restart: unless-stopped
networks:
- kafkabroker
volumes:
- data_zookeeper:/data
- datalog_zookeeper:/datalog
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
logging:
driver: "json-file"
options:
max-file: "2"
max-size: "10m"
broker1:
image: confluentinc/cp-kafka:6.2.0
container_name: kafka
hostname: broker1
networks:
- kafkabroker
depends_on:
- zookeeper
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_LISTENERS=CLIENT://broker1:9092,REPLICATION://broker1:9192
- KAFKA_ADVERTISED_LISTENERS=CLIENT://broker1:9092,REPLICATION://broker1:9192
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,REPLICATION:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=REPLICATION
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
- KAFKA_ADVERTISED_HOST_NAME=broker1
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_MESSAGE_MAX_BYTES=31457280
- KAFKA_MAX_REQUEST_SIZE=31457280
- KAFKA_PRODUCER_MAX_REQUEST_SIZE=31457280
- CONNECT_PRODUCER_MAX_REQUEST_SIZE=31457280
- KAFKA_SOCKET_REQUEST_MAX_BYTES=31457280
- KAFKA_SOCKET_SEND_BUFFER_BYTES=31457280
- KAFKA_LOG4J_ROOT_LOGLEVEL=${KAFKA_LOG4J_ROOT_LOGLEVEL:-INFO}
volumes:
- data_broker1:/var/lib/kafka
restart: unless-stopped
deploy:
resources:
limits:
memory: 1024M
reservations:
memory: 256M
logging:
driver: "json-file"
options:
max-file: "2"
max-size: "10m"
kafdrop:
image: obsidiandynamics/kafdrop:3.27.0
container_name: kafdrop
ports:
- "9000:9000"
networks:
- kafkabroker
restart: unless-stopped
environment:
KAFKA_BROKERCONNECT: broker1:9092 # <--- CORREÇÃO DA PORTA
SERVER_SERVLET_CONTEXTPATH: /kafdrop/
depends_on:
- broker1
deploy:
resources:
limits:
memory: 1024M
reservations:
memory: 256M
logging:
driver: "json-file"
options:
max-file: "2"
max-size: "10m"
networks:
my_network:
driver: bridge
mongodb:
driver: bridge
kafkabroker:
driver: bridge
volumes:
mongodb_data:
data_zookeeper:
driver: local
datalog_zookeeper:
driver: local
data_broker1:
driver: local