From ddf43502b45dc43701526c3fbc5267312d749a03 Mon Sep 17 00:00:00 2001 From: Igor Laborie Date: Wed, 31 Aug 2022 08:05:05 +0200 Subject: [PATCH] Minimal docker compose support --- testcontainers/Cargo.toml | 6 +- testcontainers/src/compose/builder.rs | 80 ++++ testcontainers/src/compose/mod.rs | 348 ++++++++++++++++++ testcontainers/src/lib.rs | 5 + testcontainers/tests/compose.rs | 225 +++++++++++ .../docker-compose.yml | 142 +++++++ .../cp-all-in-one-kraft/docker-compose.yml | 167 +++++++++ .../compose/cp-all-in-one-kraft/update_run.sh | 10 + .../tests/compose/elastic/docker-compose.yml | 230 ++++++++++++ .../tests/compose/profiles/docker-compose.yml | 21 ++ .../tests/compose/simple/docker-compose.yml | 21 ++ 11 files changed, 1253 insertions(+), 2 deletions(-) create mode 100644 testcontainers/src/compose/builder.rs create mode 100644 testcontainers/src/compose/mod.rs create mode 100644 testcontainers/tests/compose.rs create mode 100644 testcontainers/tests/compose/cp-all-in-one-community/docker-compose.yml create mode 100644 testcontainers/tests/compose/cp-all-in-one-kraft/docker-compose.yml create mode 100755 testcontainers/tests/compose/cp-all-in-one-kraft/update_run.sh create mode 100644 testcontainers/tests/compose/elastic/docker-compose.yml create mode 100644 testcontainers/tests/compose/profiles/docker-compose.yml create mode 100644 testcontainers/tests/compose/simple/docker-compose.yml diff --git a/testcontainers/Cargo.toml b/testcontainers/Cargo.toml index 61e9f972..2abea0ec 100644 --- a/testcontainers/Cargo.toml +++ b/testcontainers/Cargo.toml @@ -4,7 +4,7 @@ version = "0.14.0" authors = [ "CoBloX developers " ] categories = [ "development-tools::testing" ] edition = "2021" -keywords = [ "docker", "testcontainers" ] +keywords = [ "docker", "testcontainers", "docker-compose" ] license = "MIT OR Apache-2.0" repository = "https://github.com/testcontainers/testcontainers-rs" rust-version = "1.60.0" @@ -19,6 +19,7 @@ futures = "0.3" hex = "0.4" hmac = "0.12" log = "0.4" +once_cell = { version = "1.7", optional = true } rand = "0.8" serde = { version = "1", features = [ "derive" ] } serde_json = "1" @@ -27,9 +28,10 @@ signal-hook = { version = "0.3", optional = true } tokio = { version = "1", features = [ "macros" ], optional = true } [features] -default = [ ] +default = [ "compose" ] watchdog = [ "signal-hook", "conquer-once" ] experimental = [ "async-trait", "bollard", "tokio" ] +compose = [ "once_cell" ] [dev-dependencies] bitcoincore-rpc = "0.16" diff --git a/testcontainers/src/compose/builder.rs b/testcontainers/src/compose/builder.rs new file mode 100644 index 00000000..e675495d --- /dev/null +++ b/testcontainers/src/compose/builder.rs @@ -0,0 +1,80 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use crate::core::WaitFor; + +use super::{DockerCompose, StopMode}; + +/// A builder for [`DockerCompose`] +#[derive(Debug, Clone, Default)] +pub struct DockerComposeBuilder { + path: PathBuf, + env: HashMap, + ready_conditions: Vec<(String, WaitFor)>, + stop_mode: StopMode, + inherit_io: bool, +} + +impl DockerComposeBuilder { + /// Create a docker compose builder. + /// + /// The path should be the docker compose configuration file, usually `docker-compose.yaml` + pub fn new(path: impl AsRef) -> Self { + Self { + path: path.as_ref().to_path_buf(), + ..Default::default() + } + } + + /// Add an environment variable + pub fn env(mut self, key: impl Into, value: impl Into) -> Self { + self.env.insert(key.into(), value.into()); + self + } + + /// Add some profiles + pub fn profiles(mut self, profiles: impl IntoIterator) -> Self + where + T: ToString, + { + let profiles = profiles + .into_iter() + .map(|it| it.to_string()) + .collect::>() + .join(","); + self.env.insert("COMPOSE_PROFILES".to_string(), profiles); + self + } + + /// Add a [`WaitFor`] for a service + pub fn wait(mut self, service: impl Into, wait: WaitFor) -> Self { + self.ready_conditions.push((service.into(), wait)); + self + } + + /// Set the stop mode, by default it's [`StopMode::Stop`] + pub fn stop_with(mut self, stop_mode: StopMode) -> Self { + self.stop_mode = stop_mode; + self + } + + /// Show docker compose stdout and stderr + pub fn inherit_io(mut self) -> Self { + self.inherit_io = true; + self + } + + /// Create the [`DockerCompose`] + pub fn build(self) -> DockerCompose { + DockerCompose { + path: self.path, + env: self.env, + child: None, + ready_conditions: self.ready_conditions, + stop_mode: self.stop_mode, + inherit_io: self.inherit_io, + } + } +} diff --git a/testcontainers/src/compose/mod.rs b/testcontainers/src/compose/mod.rs new file mode 100644 index 00000000..058eae7d --- /dev/null +++ b/testcontainers/src/compose/mod.rs @@ -0,0 +1,348 @@ +//! Testing with Docker compose +//! +//! Requirements: +//! +//! * enabling the `compose` feature. +//! * have a `docker compose` v2.x.x installed +//! +//! ``` +//! let dc = DockerCompose::builder() +//! .env("MY_ENV_VAR", "MyValue") +//! .wait("mydb", WaitFor::message_on_stdout("database is ready")) +//! .wait("webserver", WaitFor::Healthcheck) +//! .build(); +//! dc.up(); +//! dc.block_until_ready(); +//! +//! let port = dc.get_mapped_port("webserver", 80).unwrap(); +//! // testing my web server... +//! ``` +//! You can configure the behavior when the `DockerCompose` is dropped +//! by choosing a [`StopMode`](crate::compose::StopMode) with [`DockerComposeBuilder::stop_with`](crate::compose::DockerComposeBuilder::stop_with) +//! +//! For debugging purpose, you can show `docker compose` output with [`DockerComposeBuilder::inherit_io`](crate::compose::DockerComposeBuilder::inherit_io) +//! +//! You can found more examples in the `tests/compose.rs` file +//! + +use std::{ + collections::HashMap, + io::{BufRead, Read}, + net::SocketAddr, + path::{Path, PathBuf}, + process::{Child, Command, Stdio}, + sync::Mutex, + thread::sleep, + time::{Duration, SystemTime}, +}; + +use bollard_stubs::models::{ContainerStateStatusEnum, HealthStatusEnum}; +use log::{debug, info, trace}; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; + +use crate::core::{ + logs::{LogStream, WaitError}, + WaitFor, +}; + +mod builder; +pub use self::builder::*; + +/// Define the behavior of the [`DockerCompose`] drop +/// By default we use [`StopMode::Stop`] +#[derive(Debug, Clone, Copy)] +pub enum StopMode { + /// Stop containers + Stop, + /// Stop and remove containers + StopAndRemove, + /// Do nothing, containers still running after the drop + Detach, +} + +impl Default for StopMode { + fn default() -> Self { + Self::Stop + } +} + +/// Implementation of the Docker compose client using the CLI + +#[derive(Debug)] +pub struct DockerCompose { + path: PathBuf, + env: HashMap, + child: Option, + ready_conditions: Vec<(String, WaitFor)>, + stop_mode: StopMode, + inherit_io: bool, +} + +// Trick to avoid code to be run concurrently +static NO_PARALLEL: Lazy> = Lazy::new(Mutex::default); + +impl DockerCompose { + /// Create the builder + pub fn builder(path: impl AsRef) -> DockerComposeBuilder { + DockerComposeBuilder::new(path) + } + + fn cmd(&self) -> Command { + let mut cmd = Command::new("docker"); + cmd.arg("compose"); + cmd.arg("--file"); + cmd.arg(&self.path); + cmd.envs(&self.env); + + cmd + } + + fn pull(&self) -> Result<(), std::io::Error> { + let mut cmd = self.cmd(); + cmd.args(["pull"]); + trace!("Starting command {:?}", cmd); + let _ = cmd.spawn()?.wait()?; + Ok(()) + } + + /// Call the `up` command + pub fn up(&mut self) { + info!("Starting {:?}", self.path); + // Could fail when launched in parallel with same docker-compose file + // So we use a locking guard to avoid that situation + let _shared = NO_PARALLEL.lock(); + + // Pulling with inherited stdout/stderr + self.pull().expect("Failed to execute docker compose pull"); + + let mut cmd = self.cmd(); + cmd.args(["up"]); + trace!("Starting command {:?}", cmd); + if !self.inherit_io { + cmd.stdout(Stdio::null()); + cmd.stderr(Stdio::null()); + } + let child = cmd + .spawn() + .expect("Failed to execute docker compose up command"); + self.child = Some(child); + } + + /// Wait until each [`WaitFor`] for service are ready + pub fn block_until_ready(&self) { + info!("Waiting ready {:?}", self.path); + let start = SystemTime::now(); + for (service, wait) in self.ready_conditions.iter() { + debug!("Waiting service {} and {:?}", service, wait); + match wait { + WaitFor::StdOutMessage { message } => { + let child = self.logs(service, Stdio::piped(), Stdio::null()); + self.wait_log_stream(service, child.stdout.unwrap(), message) + .expect("Message not found in stdout"); + } + WaitFor::StdErrMessage { message } => { + let child = self.logs(service, Stdio::null(), Stdio::piped()); + self.wait_log_stream(service, child.stderr.unwrap(), message) + .expect("Message not found in stderr"); + } + WaitFor::Duration { length } => { + std::thread::sleep(*length); + } + WaitFor::Healthcheck => { + self.wait_health_check(service); + } + WaitFor::Nothing => {} + } + } + debug!( + "Wait until ready took {}s", + start.elapsed().unwrap().as_secs() + ); + } + + fn logs(&self, service: &str, stdout: Stdio, stderr: Stdio) -> Child { + let mut cmd = self.cmd(); + cmd.args(["logs", "--follow", "--no-log-prefix", service]); + trace!("Logs command {:?}", cmd); + cmd.stdout(stdout) + .stderr(stderr) + .spawn() + .expect("Failed to execute docker compose down command") + } + + fn wait_log_stream( + &self, + service: &str, + stdio: impl Read + 'static, + message: &str, + ) -> Result<(), WaitError> { + self.wait_service_running(service); + + let log_stream = LogStream::new(stdio); + log_stream.wait_for_message(message) + } + + fn wait_health_check(&self, service: &str) { + self.wait_service_running(service); + + loop { + use HealthStatusEnum::*; + let health_status = self.service_status(service).map(|it| it.health); + + match health_status { + Some(HEALTHY) => break, + None | Some(EMPTY) | Some(NONE) => { + panic!("Healthcheck not configured for container") + } + Some(UNHEALTHY) => panic!("Healthcheck reports unhealthy"), + Some(STARTING) => std::thread::sleep(Duration::from_millis(100)), + } + } + } + + /// Retrieve the status of services + pub fn status(&self) -> Vec { + let mut cmd = self.cmd(); + cmd.args(["ps", "--format", "json"]); + trace!("Status command {:?}", cmd); + let out = cmd + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .output() + .expect("Failed to execute docker compose ps command"); + serde_json::from_slice::>(&out.stdout) + .expect("Failed to parse docker compose ps command result") + } + + /// Retrieve a service status + pub fn service_status(&self, service: &str) -> Option { + let mut cmd = self.cmd(); + cmd.args(["ps", "--format", "json", service]); + trace!("Service status command {:?}", cmd); + let out = cmd + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .output() + .expect("Failed to execute docker compose ps command"); + if out.stdout.is_empty() { + return None; + } + let statuses = serde_json::from_slice::>(&out.stdout) + .expect("Failed to parse docker compose ps command result"); + statuses + .into_iter() + .find(|status| status.service == service) + } + + fn wait_service_running(&self, service: &str) { + loop { + let status = self.service_status(service).map(|it| it.state); + trace!("Found service '{service}' status {status:?}"); + if status == Some(ContainerStateStatusEnum::RUNNING) { + break; + } + sleep(Duration::from_millis(100)); + } + } + + /// Get a service mapped port + pub fn get_mapped_port(&self, service: &str, internal_port: u16) -> Option { + let port = format!("{internal_port}"); + let mut cmd = self.cmd(); + cmd.args(["port", service, &port]); + trace!("Port command {:?}", cmd); + let output = cmd + .output() + .expect("Failed to execute docker compose port command"); + output.stdout.lines().next().map(|line| { + line.expect("Failed to get line") + .parse::() + .expect("Failed to build socket address") + .port() + }) + } + + /// Stop containers + pub fn stop(&mut self) { + info!("Stopping {:?}", self.path); + let mut cmd = self.cmd(); + cmd.arg("stop"); + trace!("Stop command {:?}", cmd); + if !self.inherit_io { + cmd.stdout(Stdio::null()); + cmd.stderr(Stdio::null()); + } + cmd.spawn() + .expect("Failed to execute down docker compose command") + .wait() + .expect("Failed to stop containers"); + } + + /// Remove containers + pub fn rm(&mut self) { + info!("Removing {:?}", self.path); + let mut cmd = self.cmd(); + cmd.args(["rm", "--force"]); + if !self.inherit_io { + cmd.stdout(Stdio::null()); + cmd.stderr(Stdio::null()); + } + cmd.spawn() + .expect("Failed to execute rm docker compose command") + .wait() + .expect("Failed to remove containers"); + } +} + +impl Drop for DockerCompose { + fn drop(&mut self) { + debug!("Dropping {:?} with {:?}", self.path, self.stop_mode); + match self.stop_mode { + StopMode::Stop => { + self.stop(); + if let Some(mut child) = self.child.take() { + let exit = child.wait().expect("command wasn't running"); + info!("Exited with status {}", exit); + } + } + StopMode::StopAndRemove => { + self.stop(); + if let Some(mut child) = self.child.take() { + let exit = child.wait().expect("command wasn't running"); + info!("Exited with status {}", exit); + } + self.rm(); + } + StopMode::Detach => {} + } + } +} + +/// Represent the service state read from `docker compose ps` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ServiceState { + #[serde(alias = "ID")] + id: String, + name: String, + command: String, + project: String, + service: String, + state: ContainerStateStatusEnum, + health: HealthStatusEnum, + exit_code: Option, +} + +#[cfg(test)] +mod test { + + use super::DockerCompose; + + #[test] + fn docker_compose_should_be_send_and_sync() { + assert_send_and_sync::(); + } + + fn assert_send_and_sync() {} +} diff --git a/testcontainers/src/lib.rs b/testcontainers/src/lib.rs index 42e311f1..437d6424 100644 --- a/testcontainers/src/lib.rs +++ b/testcontainers/src/lib.rs @@ -48,3 +48,8 @@ pub mod clients; pub mod core; /// All available Docker images. pub mod images; + +#[cfg(feature = "compose")] +/// Using docker compose +/// Require enabling the `compose` feature. +pub mod compose; diff --git a/testcontainers/tests/compose.rs b/testcontainers/tests/compose.rs new file mode 100644 index 00000000..471a5138 --- /dev/null +++ b/testcontainers/tests/compose.rs @@ -0,0 +1,225 @@ +#![cfg(feature = "compose")] + +use futures::StreamExt; +use log::info; +use rdkafka::{ + consumer::{Consumer, StreamConsumer}, + producer::{FutureProducer, FutureRecord}, + ClientConfig, Message, +}; +use std::time::Duration; +use testcontainers::{ + compose::{DockerCompose, StopMode}, + core::WaitFor, +}; + +/// Testing a simple docker compose file +#[test] +fn compose_simple() { + let _ = pretty_env_logger::try_init(); + + let mut dc = DockerCompose::builder("./tests/compose/simple/docker-compose.yml") + .wait("web1", WaitFor::message_on_stdout("server is ready")) + .inherit_io() + .build(); + + dc.up(); + dc.block_until_ready(); + + let port = dc.get_mapped_port("web1", 80).unwrap_or_default(); + assert_eq!(port, 8081); +} + +/// Testing with a docker compose profile +#[test] +fn compose_simple_with_profile() { + let _ = pretty_env_logger::try_init(); + + let mut dc = DockerCompose::builder("./tests/compose/profiles/docker-compose.yml") + .wait("web1", WaitFor::message_on_stdout("server is ready")) + .wait("web2", WaitFor::message_on_stdout("server is ready")) + .wait("web3", WaitFor::message_on_stdout("server is ready")) + .profiles(["test1", "test2"]) + .build(); + + dc.up(); + dc.block_until_ready(); + + let port = dc.get_mapped_port("web1", 80).unwrap_or_default(); + assert_eq!(port, 8091); + + let port = dc.get_mapped_port("web2", 80).unwrap_or_default(); + assert_eq!(port, 8092); + + let port = dc.get_mapped_port("web3", 80).unwrap_or_default(); + assert_eq!(port, 8093); +} + +/// Testing with Elastic search +/// See +#[test] +fn test_elastic() { + let _ = pretty_env_logger::try_init(); + + let mut dc = DockerCompose::builder("./tests/compose/elastic/docker-compose.yml") + .env("ELASTIC_PASSWORD", "p@ssw0rd!") + .env("KIBANA_PASSWORD", "p@ssw0rd!") + .env("STACK_VERSION", "8.4.1") + .env("CLUSTER_NAME", "docker-cluster") + .env("LICENSE", "basic") + .env("ES_PORT", "9200") + .env("KIBANA_PORT", "5601") + .env("MEM_LIMIT", "1073741824") + .wait("es01", WaitFor::Healthcheck) + .wait("kibana", WaitFor::Healthcheck) + // you can display docker-compose output for debugging purpose + // .inherit_io() + .build(); + + dc.up(); + dc.block_until_ready(); + + let port = dc.get_mapped_port("es01", 9200).unwrap(); + assert_eq!(port, 9200); + + let port = dc.get_mapped_port("kibana", 5601).unwrap(); + assert_eq!(port, 5601); +} + +/// Testing with Kafka `cp-all-in-one-community` and `cp-all-in-one-kraft` +/// See +/// +/// As these config use explicit container name, we need to cleanup the containers +/// So we call the `DockerCompose::rm` function to remove stopped container +#[tokio::test] +async fn compose_kafka_cp_all_in_one_community() { + let _ = pretty_env_logger::try_init(); + + info!("🧪 Testing cp-all-in-one-community"); + let mut dc = build_cp_all_in_one_community(); + dc.up(); + dc.block_until_ready(); + test_kafka(&mut dc).await; + std::mem::drop(dc); // stop and remove containers + + info!("🧪 Testing cp-all-in-one-kraft"); + let mut dc = build_cp_all_in_one_kraft(); + dc.up(); + dc.block_until_ready(); + test_kafka(&mut dc).await; +} + +fn build_cp_all_in_one_community() -> DockerCompose { + DockerCompose::builder("./tests/compose/cp-all-in-one-community/docker-compose.yml") + .wait( + "broker", + WaitFor::message_on_stdout("Checking need to trigger auto leader balancing"), + ) + .wait( + "schema-registry", + WaitFor::message_on_stdout("Server started, listening for requests..."), + ) + .wait( + "rest-proxy", + WaitFor::message_on_stdout("Server started, listening for requests..."), + ) + // IMPORTANT, we need to remove containers while dropping the `DockerCompose` + .stop_with(StopMode::StopAndRemove) + // you can display docker-compose output for debugging purpose + // .inherit_io() + .build() +} + +fn build_cp_all_in_one_kraft() -> DockerCompose { + DockerCompose::builder("./tests/compose/cp-all-in-one-kraft/docker-compose.yml") + .wait("broker", WaitFor::message_on_stdout("Kafka Server started")) + .wait( + "schema-registry", + WaitFor::message_on_stdout("Server started, listening for requests..."), + ) + .wait( + "rest-proxy", + WaitFor::message_on_stdout("Server started, listening for requests..."), + ) + // IMPORTANT, we need to remove containers while dropping the `DockerCompose` + .stop_with(StopMode::StopAndRemove) + // you can display docker-compose output for debugging purpose + // .inherit_io() + .build() +} + +async fn test_kafka(dc: &mut DockerCompose) { + let port = dc.get_mapped_port("broker", 9092); + let bootstrap_servers = format!("127.0.0.1:{}", port.unwrap()); + test_produce_and_consume_messages(&bootstrap_servers).await; + + let port = dc.get_mapped_port("rest-proxy", 8082); + let rest_proxy = format!("http://127.0.0.1:{}", port.unwrap()); + test_list_topics(&rest_proxy).await; +} + +async fn test_produce_and_consume_messages(bootstrap_servers: &str) { + let producer = ClientConfig::new() + .set("bootstrap.servers", bootstrap_servers) + .set("message.timeout.ms", "5000") + .create::() + .expect("Failed to create Kafka FutureProducer"); + + let consumer = ClientConfig::new() + .set("group.id", "testcontainer-rs") + .set("bootstrap.servers", bootstrap_servers) + .set("session.timeout.ms", "6000") + .set("enable.auto.commit", "false") + .set("auto.offset.reset", "earliest") + .create::() + .expect("Failed to create Kafka StreamConsumer"); + + let topic = "test-topic"; + + let number_of_messages_to_produce = 5_usize; + let expected: Vec = (0..number_of_messages_to_produce) + .map(|i| format!("Message {}", i)) + .collect(); + + for (i, message) in expected.iter().enumerate() { + producer + .send( + FutureRecord::to(topic) + .payload(message) + .key(&format!("Key {}", i)), + Duration::from_secs(0), + ) + .await + .unwrap(); + } + + consumer + .subscribe(&[topic]) + .expect("Failed to subscribe to a topic"); + + let mut message_stream = consumer.stream(); + for produced in expected { + let borrowed_message = tokio::time::timeout(Duration::from_secs(10), message_stream.next()) + .await + .unwrap() + .unwrap(); + + assert_eq!( + produced, + borrowed_message + .unwrap() + .payload_view::() + .unwrap() + .unwrap() + ); + } +} + +async fn test_list_topics(rest_proxy: &str) { + let client = reqwest::Client::builder().build().unwrap(); + let url = format!("{rest_proxy}/topics"); + let result = client.get(url).send().await.unwrap(); + let content = result.text().await.unwrap(); + let topics = serde_json::from_str::>(&content).unwrap(); + assert!(topics.iter().any(|topic| topic == "test-topic")); +} diff --git a/testcontainers/tests/compose/cp-all-in-one-community/docker-compose.yml b/testcontainers/tests/compose/cp-all-in-one-community/docker-compose.yml new file mode 100644 index 00000000..fd1fe870 --- /dev/null +++ b/testcontainers/tests/compose/cp-all-in-one-community/docker-compose.yml @@ -0,0 +1,142 @@ +--- +# Copied from https://github.com/confluentinc/cp-all-in-one/tree/7.2.1-post/cp-all-in-one-community +version: '2' +services: + zookeeper: + image: confluentinc/cp-zookeeper:7.2.1 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + broker: + image: confluentinc/cp-kafka:7.2.1 + hostname: broker + container_name: broker + depends_on: + - zookeeper + ports: + - "29092:29092" + - "9092:9092" + - "9101:9101" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JMX_PORT: 9101 + KAFKA_JMX_HOSTNAME: localhost + + schema-registry: + image: confluentinc/cp-schema-registry:7.2.1 + hostname: schema-registry + container_name: schema-registry + depends_on: + - broker + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 + + connect: + image: cnfldemos/kafka-connect-datagen:0.6.0-7.2.1 + hostname: connect + container_name: connect + depends_on: + - broker + - schema-registry + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components" + CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + + ksqldb-server: + image: confluentinc/cp-ksqldb-server:7.2.1 + hostname: ksqldb-server + container_name: ksqldb-server + depends_on: + - broker + - connect + ports: + - "8088:8088" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_BOOTSTRAP_SERVERS: "broker:29092" + KSQL_HOST_NAME: ksqldb-server + KSQL_LISTENERS: "http://0.0.0.0:8088" + KSQL_CACHE_MAX_BYTES_BUFFERING: 0 + KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + KSQL_KSQL_CONNECT_URL: "http://connect:8083" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1 + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true' + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' + + ksqldb-cli: + image: confluentinc/cp-ksqldb-cli:7.2.1 + container_name: ksqldb-cli + depends_on: + - broker + - connect + - ksqldb-server + entrypoint: /bin/sh + tty: true + + ksql-datagen: + image: confluentinc/ksqldb-examples:7.2.1 + hostname: ksql-datagen + container_name: ksql-datagen + depends_on: + - ksqldb-server + - broker + - schema-registry + - connect + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b broker:29092 1 40 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 40 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 11 && \ + tail -f /dev/null'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + STREAMS_BOOTSTRAP_SERVERS: broker:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + + rest-proxy: + image: confluentinc/cp-kafka-rest:7.2.1 + depends_on: + - broker + - schema-registry + ports: + - 8082:8082 + hostname: rest-proxy + container_name: rest-proxy + environment: + KAFKA_REST_HOST_NAME: rest-proxy + KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' + KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" + KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' \ No newline at end of file diff --git a/testcontainers/tests/compose/cp-all-in-one-kraft/docker-compose.yml b/testcontainers/tests/compose/cp-all-in-one-kraft/docker-compose.yml new file mode 100644 index 00000000..450836fd --- /dev/null +++ b/testcontainers/tests/compose/cp-all-in-one-kraft/docker-compose.yml @@ -0,0 +1,167 @@ +--- +version: '2' +services: + + broker: + image: confluentinc/cp-kafka:7.2.1 + hostname: broker + container_name: broker + ports: + - "9092:9092" + - "9101:9101" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092' + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_JMX_PORT: 9101 + KAFKA_JMX_HOSTNAME: localhost + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_NODE_ID: 1 + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:29093' + KAFKA_LISTENERS: 'PLAINTEXT://broker:29092,CONTROLLER://broker:29093,PLAINTEXT_HOST://0.0.0.0:9092' + KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' + volumes: + - ./update_run.sh:/tmp/update_run.sh + command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'" + + schema-registry: + image: confluentinc/cp-schema-registry:7.2.1 + hostname: schema-registry + container_name: schema-registry + depends_on: + - broker + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 + + connect: + image: cnfldemos/cp-server-connect-datagen:0.6.0-7.2.1 + hostname: connect + container_name: connect + depends_on: + - broker + - schema-registry + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + # CLASSPATH required due to CC-2422 + CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.2.1.jar + CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components" + CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + + control-center: + image: confluentinc/cp-enterprise-control-center:7.2.1 + hostname: control-center + container_name: control-center + depends_on: + - broker + - schema-registry + - connect + - ksqldb-server + ports: + - "9021:9021" + environment: + CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092' + CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083' + CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088" + CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088" + CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + CONTROL_CENTER_REPLICATION_FACTOR: 1 + CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 + CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 + CONFLUENT_METRICS_TOPIC_REPLICATION: 1 + PORT: 9021 + + ksqldb-server: + image: confluentinc/cp-ksqldb-server:7.2.1 + hostname: ksqldb-server + container_name: ksqldb-server + depends_on: + - broker + - connect + ports: + - "8088:8088" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_BOOTSTRAP_SERVERS: "broker:29092" + KSQL_HOST_NAME: ksqldb-server + KSQL_LISTENERS: "http://0.0.0.0:8088" + KSQL_CACHE_MAX_BYTES_BUFFERING: 0 + KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + KSQL_KSQL_CONNECT_URL: "http://connect:8083" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1 + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true' + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' + + ksqldb-cli: + image: confluentinc/cp-ksqldb-cli:7.2.1 + container_name: ksqldb-cli + depends_on: + - broker + - connect + - ksqldb-server + entrypoint: /bin/sh + tty: true + + ksql-datagen: + image: confluentinc/ksqldb-examples:7.2.1 + hostname: ksql-datagen + container_name: ksql-datagen + depends_on: + - ksqldb-server + - broker + - schema-registry + - connect + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b broker:29092 1 40 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 40 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 11 && \ + tail -f /dev/null'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + STREAMS_BOOTSTRAP_SERVERS: broker:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + + rest-proxy: + image: confluentinc/cp-kafka-rest:7.2.1 + depends_on: + - broker + - schema-registry + ports: + - 8082:8082 + hostname: rest-proxy + container_name: rest-proxy + environment: + KAFKA_REST_HOST_NAME: rest-proxy + KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' + KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" + KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' \ No newline at end of file diff --git a/testcontainers/tests/compose/cp-all-in-one-kraft/update_run.sh b/testcontainers/tests/compose/cp-all-in-one-kraft/update_run.sh new file mode 100755 index 00000000..b23ac0b6 --- /dev/null +++ b/testcontainers/tests/compose/cp-all-in-one-kraft/update_run.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter +sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure + +# Docker workaround: Ignore cub zk-ready +sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure + +# KRaft required step: Format the storage directory with a new cluster ID +echo "kafka-storage format --ignore-formatted -t $(kafka-storage random-uuid) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure \ No newline at end of file diff --git a/testcontainers/tests/compose/elastic/docker-compose.yml b/testcontainers/tests/compose/elastic/docker-compose.yml new file mode 100644 index 00000000..7e49d5ea --- /dev/null +++ b/testcontainers/tests/compose/elastic/docker-compose.yml @@ -0,0 +1,230 @@ +version: "2.2" + +services: + setup: + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + volumes: + - certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: es01\n"\ + " dns:\n"\ + " - es01\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: es02\n"\ + " dns:\n"\ + " - es02\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: es03\n"\ + " dns:\n"\ + " - es03\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: [ "CMD-SHELL", "[ -f config/certs/es01/es01.crt ]" ] + interval: 1s + timeout: 5s + retries: 120 + + es01: + depends_on: + setup: + condition: service_healthy + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata01:/usr/share/elasticsearch/data + ports: + - ${ES_PORT}:9200 + environment: + - node.name=es01 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es02,es03 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es01/es01.key + - xpack.security.http.ssl.certificate=certs/es01/es01.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es01/es01.key + - xpack.security.transport.ssl.certificate=certs/es01/es01.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'" + ] + interval: 10s + timeout: 10s + retries: 120 + + es02: + depends_on: + - es01 + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata02:/usr/share/elasticsearch/data + environment: + - node.name=es02 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es01,es03 + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es02/es02.key + - xpack.security.http.ssl.certificate=certs/es02/es02.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es02/es02.key + - xpack.security.transport.ssl.certificate=certs/es02/es02.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'" + ] + interval: 10s + timeout: 10s + retries: 120 + + es03: + depends_on: + - es02 + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata03:/usr/share/elasticsearch/data + environment: + - node.name=es03 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es01,es02 + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es03/es03.key + - xpack.security.http.ssl.certificate=certs/es03/es03.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es03/es03.key + - xpack.security.transport.ssl.certificate=certs/es03/es03.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'" + ] + interval: 10s + timeout: 10s + retries: 120 + + kibana: + depends_on: + es01: + condition: service_healthy + es02: + condition: service_healthy + es03: + condition: service_healthy + image: docker.elastic.co/kibana/kibana:${STACK_VERSION} + volumes: + - certs:/usr/share/kibana/config/certs + - kibanadata:/usr/share/kibana/data + ports: + - ${KIBANA_PORT}:5601 + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=https://es01:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + mem_limit: ${MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'" + ] + interval: 10s + timeout: 10s + retries: 120 + +volumes: + certs: + driver: local + esdata01: + driver: local + esdata02: + driver: local + esdata03: + driver: local + kibanadata: + driver: local diff --git a/testcontainers/tests/compose/profiles/docker-compose.yml b/testcontainers/tests/compose/profiles/docker-compose.yml new file mode 100644 index 00000000..a2fe00c3 --- /dev/null +++ b/testcontainers/tests/compose/profiles/docker-compose.yml @@ -0,0 +1,21 @@ +--- +version: "2" +services: + web1: + image: simple_web_server + ports: + - "8091:80" + + web2: + profiles: + - test1 + image: simple_web_server + ports: + - "8092:80" + + web3: + profiles: + - test2 + image: simple_web_server + ports: + - "8093:80" diff --git a/testcontainers/tests/compose/simple/docker-compose.yml b/testcontainers/tests/compose/simple/docker-compose.yml new file mode 100644 index 00000000..f1c31680 --- /dev/null +++ b/testcontainers/tests/compose/simple/docker-compose.yml @@ -0,0 +1,21 @@ +--- +version: "2" +services: + web1: + image: simple_web_server + ports: + - "8081:80" + + web2: + profiles: + - test1 + image: simple_web_server + ports: + - "8082:80" + + web3: + profiles: + - test2 + image: simple_web_server + ports: + - "8083:80"