diff --git a/cwms-data-api/build.gradle b/cwms-data-api/build.gradle index c2bc13bf1..0e2bbac37 100644 --- a/cwms-data-api/build.gradle +++ b/cwms-data-api/build.gradle @@ -151,6 +151,8 @@ dependencies { implementation(libs.bundles.overrides) testImplementation(libs.bundles.java.parser) + implementation(libs.togglz.core) + implementation(libs.minio) } task extractWebJars(type: Copy) { @@ -245,7 +247,7 @@ task run(type: JavaExec) { } task integrationTests(type: Test) { - dependsOn test +// dependsOn test dependsOn generateConfig dependsOn war diff --git a/cwms-data-api/src/main/java/cwms/cda/api/BlobController.java b/cwms-data-api/src/main/java/cwms/cda/api/BlobController.java index 2764fe1b5..44cf20104 100644 --- a/cwms-data-api/src/main/java/cwms/cda/api/BlobController.java +++ b/cwms-data-api/src/main/java/cwms/cda/api/BlobController.java @@ -9,6 +9,9 @@ import com.codahale.metrics.Timer; import cwms.cda.api.errors.CdaError; import cwms.cda.data.dao.BlobDao; +import cwms.cda.data.dao.BlobAccess; +import cwms.cda.data.dao.ObjectStorageBlobDao; +import cwms.cda.data.dao.ObjectStorageConfig; import cwms.cda.data.dao.JooqDao; import cwms.cda.data.dto.Blob; import cwms.cda.data.dto.Blobs; @@ -33,6 +36,9 @@ import org.jetbrains.annotations.NotNull; import org.jooq.DSLContext; +import org.togglz.core.context.FeatureContext; +import cwms.cda.features.CdaFeatures; +import org.togglz.core.manager.FeatureManager; /** @@ -62,6 +68,31 @@ protected DSLContext getDslContext(Context ctx) { return JooqDao.getDslContext(ctx); } + private BlobAccess chooseBlobAccess(DSLContext dsl) { + boolean useObjectStore = isObjectStorageEnabled(); + try { + // Prefer Togglz if available + FeatureManager featureManager = FeatureContext.getFeatureManager(); + useObjectStore = featureManager.isActive(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + } catch (Throwable ignore) { + // fall back to system/env property check + } + if (useObjectStore) { + ObjectStorageConfig cfg = ObjectStorageConfig.fromSystem(); + return new ObjectStorageBlobDao(cfg); + } + return new BlobDao(dsl); + } + + private boolean isObjectStorageEnabled() { + // System properties first, then env. Accept FEATURE=true + String key = String.valueOf(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + String v = System.getProperty(key); + if (v == null) v = System.getProperty(key); + if (v == null) v = System.getenv(key); + return v != null && ("true".equalsIgnoreCase(v) || "1".equals(v)); + } + @OpenApi( queryParams = { @OpenApiParam(name = OFFICE, @@ -115,7 +146,7 @@ public void getAll(@NotNull Context ctx) { String formatHeader = ctx.header(Header.ACCEPT); ContentType contentType = Formats.parseHeader(formatHeader, Blobs.class); - BlobDao dao = new BlobDao(dsl); + BlobAccess dao = chooseBlobAccess(dsl); Blobs blobs = dao.getBlobs(cursor, pageSize, office, like); String result = Formats.format(contentType, blobs); @@ -151,12 +182,13 @@ public void getAll(@NotNull Context ctx) { public void getOne(@NotNull Context ctx, @NotNull String blobId) { try (final Timer.Context ignored = markAndTime(GET_ONE)) { - String idQueryParam = ctx.queryParam(CLOB_ID); + String idQueryParam = ctx.queryParam(BLOB_ID); if (idQueryParam != null) { blobId = idQueryParam; } DSLContext dsl = getDslContext(ctx); - BlobDao dao = new BlobDao(dsl); + + BlobAccess dao = chooseBlobAccess(dsl); String officeQP = ctx.queryParam(OFFICE); Optional office = Optional.ofNullable(officeQP); @@ -204,7 +236,7 @@ public void create(@NotNull Context ctx) { boolean failIfExists = ctx.queryParamAsClass(FAIL_IF_EXISTS, Boolean.class).getOrDefault(true); ContentType contentType = Formats.parseHeader(formatHeader, Blob.class); Blob blob = Formats.parseContent(contentType, ctx.bodyAsInputStream(), Blob.class); - BlobDao dao = new BlobDao(dsl); + BlobAccess dao = chooseBlobAccess(dsl); dao.create(blob, failIfExists, false); ctx.status(HttpCode.CREATED); } @@ -213,7 +245,7 @@ public void create(@NotNull Context ctx) { @OpenApi( description = "Update an existing Blob", pathParams = { - @OpenApiParam(name = BLOB_ID, description = "The blob identifier to be deleted"), + @OpenApiParam(name = BLOB_ID, description = "The blob identifier to be updated"), }, requestBody = @OpenApiRequestBody( content = { @@ -235,7 +267,7 @@ public void create(@NotNull Context ctx) { @Override public void update(@NotNull Context ctx, @NotNull String blobId) { try (final Timer.Context ignored = markAndTime(UPDATE)) { - String idQueryParam = ctx.queryParam(CLOB_ID); + String idQueryParam = ctx.queryParam(BLOB_ID); if (idQueryParam != null) { blobId = idQueryParam; } @@ -260,7 +292,13 @@ public void update(@NotNull Context ctx, @NotNull String blobId) { + "updating a blob"); } - BlobDao dao = new BlobDao(dsl); + if(!blob.getId().equals(blobId)) { + throw new FormattingException("The blob id parameter does not match the blob id in the body. " + + "The blob end-point does not support renaming blobs. " + + "Create a new blob with the new id and delete the old one."); + } + + BlobAccess dao = chooseBlobAccess(dsl); dao.update(blob, false); ctx.status(HttpServletResponse.SC_OK); } @@ -287,13 +325,13 @@ public void update(@NotNull Context ctx, @NotNull String blobId) { @Override public void delete(@NotNull Context ctx, @NotNull String blobId) { try (Timer.Context ignored = markAndTime(DELETE)) { - String idQueryParam = ctx.queryParam(CLOB_ID); + String idQueryParam = ctx.queryParam(BLOB_ID); if (idQueryParam != null) { blobId = idQueryParam; } DSLContext dsl = getDslContext(ctx); String office = requiredParam(ctx, OFFICE); - BlobDao dao = new BlobDao(dsl); + BlobAccess dao = chooseBlobAccess(dsl); dao.delete(office, blobId); ctx.status(HttpServletResponse.SC_NO_CONTENT); } diff --git a/cwms-data-api/src/main/java/cwms/cda/api/RangeParser.java b/cwms-data-api/src/main/java/cwms/cda/api/RangeParser.java new file mode 100644 index 000000000..a2e84ecfa --- /dev/null +++ b/cwms-data-api/src/main/java/cwms/cda/api/RangeParser.java @@ -0,0 +1,107 @@ +package cwms.cda.api; + +import org.jspecify.annotations.NonNull; + +import java.util.*; +import java.util.regex.*; + +/** + * Utility class for parsing HTTP Range headers. + * These typically look like: bytes=100-1234 + * or: bytes=100- this is common to resume a download + * or: bytes=0- equivalent to a regular request for the whole file + * but by returning 206 we show that we support range requests + * Note that multiple ranges can be requested at once such + * as: bytes=500-600,700-999 Server responds identifies separator and then puts separator between chunks + * bytes=0-0,-1 also legal its just the first and the last byte + * or: bytes=500-600,601-999 legal but what is the point? + * or: bytes=500-700,601-999 legal, notice they overlap. + * + * + */ +public class RangeParser { + + private static final Pattern RANGE_PATTERN = Pattern.compile("(\\d*)-(\\d*)"); + + /** + * Return a list of two element long[] containing byte ranges parsed from the HTTP Range header. + * If the end of a range is not specified ( e.g. bytes=100- ) then a -1 is returned in the second position + * If the range only includes a negative byte (e.g bytes=-50) then -1 is returned as the start of the range + * and -1*end is returned as the end of the range. bytes=-50 will result in [-1,50] + * + * @param header the HTTP Range header + * @return a list of byte ranges + */ + public static List parse(String header) { + if (header == null || header.isEmpty() ) { + return Collections.emptyList(); + } else if ( !header.startsWith("bytes=")){ + throw new IllegalArgumentException("Invalid Range header: " + header); + } + + String rangePart = header.substring(6); + List retval = parseRanges(rangePart); + if( retval.isEmpty() ){ + throw new IllegalArgumentException("Invalid Range header: " + header); + } + return retval; + } + + public static @NonNull List parseRanges(String rangePart) { + if( rangePart == null || rangePart.isEmpty() ){ + throw new IllegalArgumentException("Invalid range specified: " + rangePart); + } + String[] parts = rangePart.split(","); + List ranges = new ArrayList<>(); + + for (String part : parts) { + Matcher m = RANGE_PATTERN.matcher(part.trim()); + if (m.matches()) { + String start = m.group(1); + String end = m.group(2); + + long s = start.isEmpty() ? -1 : Long.parseLong(start); + long e = end.isEmpty() ? -1 : Long.parseLong(end); + + ranges.add(new long[]{s, e}); + } + } + return ranges; + } + + /** + * The parse() method in this class can return -1 for unspecified values or when suffix ranges are supplied. + * This method interprets the negative values in regard to the totalSize and returns inclusive indices of the + * requested range. + * @param inputs the array of start and end byte positions + * @param totalBytes the total number of bytes in the file + * @return a long array with the start and end byte positions, these are inclusive. [0,0] means return the first byte + */ + public static long[] interpret(long[] inputs, long totalBytes){ + if(inputs == null){ + throw new IllegalArgumentException("null range array provided"); + } else if( inputs.length != 2 ){ + throw new IllegalArgumentException("Invalid number of inputs: " + Arrays.toString(inputs)); + } + + long start = inputs[0]; + long end = inputs[1]; + + if(start == -1L){ + // its a suffix request. + start = totalBytes - end; + end = totalBytes - 1; + } else { + if (start < 0 || end < start) { + throw new IllegalArgumentException("Invalid range specified: " + Arrays.toString(inputs)); + } + + start = Math.min(start, totalBytes - 1); + end = Math.min(end, totalBytes - 1); + } + + return new long[]{start, end}; + } + + +} diff --git a/cwms-data-api/src/main/java/cwms/cda/api/RangeRequestUtil.java b/cwms-data-api/src/main/java/cwms/cda/api/RangeRequestUtil.java index c84c74afa..0f3fefb9b 100644 --- a/cwms-data-api/src/main/java/cwms/cda/api/RangeRequestUtil.java +++ b/cwms-data-api/src/main/java/cwms/cda/api/RangeRequestUtil.java @@ -1,14 +1,16 @@ package cwms.cda.api; +import com.google.common.flogger.FluentLogger; import io.javalin.core.util.Header; import io.javalin.http.Context; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.Arrays; import java.util.List; +import org.apache.commons.io.IOUtils; public class RangeRequestUtil { + static FluentLogger logger = FluentLogger.forEnclosingClass(); private RangeRequestUtil() { // utility class @@ -19,78 +21,79 @@ private RangeRequestUtil() { * take the InputStream, wrap it in a CompletedFuture and then process the request asynchronously. This * causes problems when the InputStream is tied to a database connection that gets closed before the * async processing happens. This method doesn't do the async thing but tries to support the rest. - * @param ctx - * @param is - * @param mediaType - * @param totalBytes - * @throws IOException + * @param ctx the Javalin context + * @param is the input stream + * @param mediaType the content type + * @param totalBytes the total number of bytes in the input stream + * @throws IOException if either of the streams throw an IOException */ public static void seekableStream(Context ctx, InputStream is, String mediaType, long totalBytes) throws IOException { - long from = 0; - long to = totalBytes - 1; + if (ctx.header(Header.RANGE) == null) { + // Not a range request. ctx.res.setContentType(mediaType); + // Javalin's version of this method doesn't set the content-length // Not setting the content-length makes the servlet container use Transfer-Encoding=chunked. // Chunked is a worse experience overall, seems like we should just set the length if we know it. - writeRange(ctx.res.getOutputStream(), is, from, Math.min(to, totalBytes - 1)); + ctx.header(Header.CONTENT_LENGTH, String.valueOf(totalBytes)); + + IOUtils.copyLarge(is, (OutputStream) ctx.res.getOutputStream(), 0, totalBytes); } else { - int chunkSize = 128000; String rangeHeader = ctx.header(Header.RANGE); - String[] eqSplit = rangeHeader.split("=", 2); - String[] dashSplit = eqSplit[1].split("-", -1); // keep empty trailing part - - List requestedRange = Arrays.stream(dashSplit) - .filter(s -> !s.isEmpty()) - .collect(java.util.stream.Collectors.toList()); - - from = Long.parseLong(requestedRange.get(0)); - if (from + chunkSize > totalBytes) { - // chunk bigger than file, write all - to = totalBytes - 1; - } else if (requestedRange.size() == 2) { - // chunk smaller than file, to/from specified - to = Long.parseLong(requestedRange.get(1)); + List ranges = RangeParser.parse(rangeHeader); + + long[] requestedRange = ranges.get(0); + if( ranges.size() > 1 ){ + // we support range requests but we not currently supporting multiple ranges. + // Range request are optional so we have choices what to do if multiple ranges are requested: + // We could return 416 and hope the client figures out to only send one range + // We could service the first range with 206 and ignore the other ranges + // We could ignore the range request entirely and return the full body with 200 + // We could implement support for multiple ranges + logger.atInfo().log("Multiple ranges requested, using first and ignoring additional ranges"); } else { - // chunk smaller than file, to/from not specified - to = from + chunkSize - 1; - } + requestedRange = RangeParser.interpret(requestedRange, totalBytes); - ctx.status(206); + long from = requestedRange[0]; + long to = requestedRange[1]; - ctx.header(Header.ACCEPT_RANGES, "bytes"); - ctx.header(Header.CONTENT_RANGE, "bytes " + from + "-" + to + "/" + totalBytes); + ctx.status(206); - ctx.res.setContentType(mediaType); - ctx.header(Header.CONTENT_LENGTH, String.valueOf(Math.min(to - from + 1, totalBytes))); - writeRange(ctx.res.getOutputStream(), is, from, Math.min(to, totalBytes - 1)); + ctx.header(Header.ACCEPT_RANGES, "bytes"); + ctx.header(Header.CONTENT_RANGE, "bytes " + from + "-" + to + "/" + totalBytes); + + ctx.res.setContentType(mediaType); + ctx.header(Header.CONTENT_LENGTH, String.valueOf(Math.min(to - from + 1, totalBytes))); + writeRange(ctx.res.getOutputStream(), is, from, Math.min(to, totalBytes - 1)); + } } } - + /** + * Writes a range of bytes from the input stream to the output stream. + * @param out the output stream to write to. + * @param in the input stream to read from. It is assumed that this stream is open and positioned at 0. + * @param from the starting byte position to read from (inclusive) + * @param to the ending byte position to read to (inclusive) + * @throws IOException if either of the streams throw an IOException + */ public static void writeRange(OutputStream out, InputStream in, long from, long to) throws IOException { - writeRange(out, in, from, to, new byte[8192]); + skip(in, from); + long len = to - from + 1; + + // If the inputOffset to IOUtils.copyLarge is not 0 then IOUtils will do its own skipping. For reasons + // that IOUtils explains (quirks of certain streams) it does its skipping via read(). Using read() has performance + // implications b/c all the skipped data gets copied to memory. We do our own skipping and then have IOUtils copy. + IOUtils.copyLarge(in, out, 0, len); } - public static void writeRange(OutputStream out, InputStream is, long from, long to, byte[] buffer) throws IOException { - long toSkip = from; + private static void skip(InputStream is, long toSkip) throws IOException { while (toSkip > 0) { long skipped = is.skip(toSkip); toSkip -= skipped; } - - long bytesLeft = to - from + 1; - while (bytesLeft != 0L) { - int maxRead = (int) Math.min(buffer.length, bytesLeft); - int read = is.read(buffer, 0, maxRead); - if (read == -1) { - break; - } - out.write(buffer, 0, read); - bytesLeft -= read; - } - } } diff --git a/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobAccess.java b/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobAccess.java new file mode 100644 index 000000000..f4d50ace1 --- /dev/null +++ b/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobAccess.java @@ -0,0 +1,26 @@ +package cwms.cda.data.dao; + +import cwms.cda.data.dto.Blob; +import cwms.cda.data.dto.Blobs; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.util.Optional; + +public interface BlobAccess { + @NotNull Blobs getBlobs(@Nullable String cursor, int pageSize, @Nullable String officeId, @Nullable String like); + + Optional getByUniqueName(String id, String office); + + void getBlob(String id, String office, BlobDao.BlobConsumer consumer); + + default void getBlob(String id, BlobDao.BlobConsumer consumer) { + getBlob(id, null, consumer); + } + + void create(Blob blob, boolean failIfExists, boolean ignoreNulls); + + void update(Blob blob, boolean ignoreNulls); + + void delete(String office, String id); +} diff --git a/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobDao.java b/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobDao.java index bdac0a54f..14737f053 100644 --- a/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobDao.java +++ b/cwms-data-api/src/main/java/cwms/cda/data/dao/BlobDao.java @@ -34,7 +34,7 @@ import static org.jooq.impl.DSL.table; import static org.jooq.impl.DSL.upper; -public class BlobDao extends JooqDao { +public class BlobDao extends JooqDao implements BlobAccess { public static final String ID = "ID"; public static final String DESCRIPTION = "DESCRIPTION"; @@ -85,6 +85,7 @@ public Optional getByUniqueName(String id, String limitToOffice) { return Optional.ofNullable(retVal); } + @Override public void getBlob(String id, String office, BlobConsumer consumer) { // Not using jOOQ here because we want the java.sql.Blob and not an automatic field binding. We want // blob so that we can pull out a stream to the data and pass that to javalin. @@ -179,6 +180,7 @@ public List getAll(String officeId, String like) { * @param like filter blobs by a case-insensitive regex pattern on their IDs, can be null or empty * @return a Blobs object containing the retrieved blobs and pagination information */ + @Override public @NotNull Blobs getBlobs(@Nullable String cursor, int pageSize, @Nullable String officeId, @Nullable String like) { String cursorOffice = null; @@ -249,6 +251,7 @@ public List getAll(String officeId, String like) { return builder.build(); } + @Override public void create(Blob blob, boolean failIfExists, boolean ignoreNulls) { String pFailIfExists = formatBool(failIfExists); String pIgnoreNulls = formatBool(ignoreNulls); @@ -265,6 +268,7 @@ public void create(Blob blob, boolean failIfExists, boolean ignoreNulls) { blob.getOfficeId())); } + @Override public void update(Blob blob, boolean ignoreNulls) { String pFailIfExists = formatBool(false); String pIgnoreNulls = formatBool(ignoreNulls); @@ -288,6 +292,7 @@ public void update(Blob blob, boolean ignoreNulls) { blob.getOfficeId())); } + @Override public void delete(String office, String id) { if (!blobExists(office, id)) { throw new NotFoundException("Unable to find blob with id " + id + " in office " + office); diff --git a/cwms-data-api/src/main/java/cwms/cda/data/dao/ObjectStorageBlobDao.java b/cwms-data-api/src/main/java/cwms/cda/data/dao/ObjectStorageBlobDao.java new file mode 100644 index 000000000..9a7e15395 --- /dev/null +++ b/cwms-data-api/src/main/java/cwms/cda/data/dao/ObjectStorageBlobDao.java @@ -0,0 +1,416 @@ +package cwms.cda.data.dao; + +import com.google.common.flogger.FluentLogger; +import cwms.cda.api.errors.AlreadyExists; +import cwms.cda.api.errors.FieldLengthExceededException; +import cwms.cda.api.errors.NotFoundException; +import cwms.cda.data.dto.Blob; +import cwms.cda.data.dto.Blobs; +import cwms.cda.data.dto.CwmsDTOPaginated; +import io.minio.*; +import io.minio.errors.*; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import javax.sql.rowset.serial.SerialBlob; +import java.io.ByteArrayOutputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.regex.Pattern; + +import io.minio.messages.Item; + +/** + * Object Storage-backed implementation using MinIO Java client. keys like OFFICE/ID_UPPER. + */ +public class ObjectStorageBlobDao implements BlobAccess { + FluentLogger logger = FluentLogger.forEnclosingClass(); + + public static final int ID_LENGTH_LIMIT = 256; // This is to match pl/sql limit + private static final int MAX_KEY_LENGTH = 1024; + private final ObjectStorageConfig config; + private final MinioClient client; + + public ObjectStorageBlobDao(ObjectStorageConfig config) { + this.config = config; + this.client = buildClient(config); + } + + private static MinioClient buildClient(ObjectStorageConfig cfg) { + MinioClient.Builder b = MinioClient.builder(); + if (cfg.endpoint() != null && !cfg.endpoint().isEmpty()) { + b = b.endpoint(cfg.endpoint()); + } + if (cfg.accessKey() != null && cfg.secretKey() != null) { + b = b.credentials(cfg.accessKey(), cfg.secretKey()); + } + + return b.build(); + } + + @Override + public @NotNull Blobs getBlobs(@Nullable String cursor, int pageSize, @Nullable String officeId, @Nullable String like) { + String prefix = null; + if (officeId != null && !officeId.isEmpty()) { + prefix = officeId.toUpperCase(Locale.ROOT) + "/"; + } + + String startAfter = null; + + String cursorOffice = null; + String cursorId = null; + if (cursor != null && !cursor.isEmpty()) { + final String[] parts = CwmsDTOPaginated.decodeCursor(cursor, "||"); + + if (parts.length > 1) { + cursorOffice = Blobs.getOffice(cursor); + cursorId = Blobs.getId(cursor); + pageSize = Integer.parseInt(parts[2]); + } + + if (cursorOffice != null && cursorId != null) { + startAfter = key(cursorOffice, cursorId); + } + } + + + Pattern likePattern = null; + if (like != null && !like.isEmpty() && !".*".equals(like)) { + likePattern = Pattern.compile(like, Pattern.CASE_INSENSITIVE); + } + + List collected = new ArrayList<>(); + + ListObjectsArgs.Builder args = ListObjectsArgs.builder() + .bucket(requiredBucket()) + .recursive(true) + .maxKeys(pageSize); + if (prefix != null) args = args.prefix(prefix); + if (startAfter != null) args = args.startAfter(startAfter); + + for (Result res : client.listObjects(args.build())) { + try { + // item.key() like OFFICE/ID + Item item = res.get(); + String k = item.objectName(); + int slash = k.indexOf('/'); + if (slash <= 0 || slash >= k.length() - 1) continue; + String off = k.substring(0, slash); + String id = k.substring(slash + 1); + if (likePattern != null && !likePattern.matcher(id).find()) { + continue; + } + // fetch metadata for media type and optional description + try { + StatObjectResponse stat = client.statObject(StatObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build()); + String mediaType = stat.contentType(); + String desc = stat.userMetadata() != null ? stat.userMetadata().getOrDefault("description", null) : null; + collected.add(new Blob(off, id, desc, mediaType, null)); + if (collected.size() >= pageSize) break; + } catch (Exception e) { + // skip items that fail stat + } + } catch (Exception ignore) { + // skip this entry on error + } + } + + Blobs.Builder builder = new Blobs.Builder(cursor, pageSize, 0); + collected.forEach(builder::addBlob); + return builder.build(); + } + + @Override + public Optional getByUniqueName(String id, String office) { + String k = (office == null || office.isEmpty()) ? findFirstKeyById(id) : key(office, id); + if (k == null) { + return Optional.empty(); + } + String officeFromKey = officeFromKey(k); + String idFromKey = idFromKey(k); + try { + StatObjectResponse stat = client.statObject(StatObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build()); + String mediaType = stat.contentType(); + String desc = stat.userMetadata() != null ? stat.userMetadata().getOrDefault("description", null) : null; + return Optional.of(new Blob(officeFromKey, idFromKey, desc, mediaType, null)); + } catch (ErrorResponseException ere) { + if ("NoSuchKey".equalsIgnoreCase(ere.errorResponse().code())) { + return Optional.empty(); + } + throw new RuntimeException(ere); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void getBlob(String id, String office, BlobDao.BlobConsumer consumer) { + String k = (office == null || office.isEmpty()) ? findFirstKeyById(id) : key(office, id); + try { + if (k == null) { + try { + consumer.accept(null, null); + } catch (Exception e) { + throw new RuntimeException(e); + } + return; + } + logger.atFine().log("Getting stat for %s", k); + // Stat first to get content type and size + StatObjectResponse stat = client.statObject(StatObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build()); + String mediaType = stat.contentType() != null ? stat.contentType() : "application/octet-stream"; + + try (InputStream is = client.getObject(GetObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build())) { + // Its too bad this has to readFully - future optimization can skip ahead + // b/c the consumer really just wants to get the stream out of the blob. + byte[] data = readFully(is); + SerialBlob blob = new SerialBlob(data); + consumer.accept(blob, mediaType); + } catch (Exception e) { + throw new RuntimeException(e); + } + } catch (ErrorResponseException ere) { + if ("NoSuchKey".equalsIgnoreCase(ere.errorResponse().code())) { + try { + // We could also just throw a NotFoundException. + // BlobController suggests consumer.accept(null, null); will handle things. + consumer.accept(null, null); + } catch (Exception e) { + throw new RuntimeException(e); + } + return; + } + throw new RuntimeException(ere); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void create(Blob blob, boolean failIfExists, boolean ignoreNulls) { + String k = key(blob.getOfficeId(), blob.getId()); + if (failIfExists) { + try { + client.statObject(StatObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build()); + throw new AlreadyExists("Blob already exists: " + k, null); + } catch (ErrorResponseException ere) { + if (!"NoSuchKey".equalsIgnoreCase(ere.errorResponse().code())) { + throw new RuntimeException(ere); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + // TODO: Figure out which of these can be something better. + try { + doPut(blob, k, ignoreNulls); + } catch (ServerException e) { + throw new RuntimeException(e); + } catch (InsufficientDataException e) { + throw new RuntimeException(e); + } catch (ErrorResponseException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } catch (InvalidKeyException e) { + throw new RuntimeException(e); + } catch (InvalidResponseException e) { + throw new RuntimeException(e); + } catch (XmlParserException e) { + throw new RuntimeException(e); + } catch (InternalException e) { + throw new RuntimeException(e); + } + } + + @Override + public void update(Blob blob, boolean ignoreNulls) { + String k = key(blob.getOfficeId(), blob.getId()); + // For updatemake sure it exists first + try { + client.statObject(StatObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build()); + } catch (ErrorResponseException ere) { + if ("NoSuchKey".equalsIgnoreCase(ere.errorResponse().code())) { + throw new NotFoundException("Unable to find blob with id " + blob.getId() + " in office " + blob.getOfficeId()); + } + throw new RuntimeException(ere); + } catch (Exception e) { + throw new RuntimeException(e); + } + + try { + doPut(blob, k, ignoreNulls); + } catch (ServerException e) { + throw new RuntimeException(e); + } catch (InsufficientDataException e) { + throw new RuntimeException(e); + } catch (ErrorResponseException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } catch (InvalidKeyException e) { + throw new RuntimeException(e); + } catch (InvalidResponseException e) { + throw new RuntimeException(e); + } catch (XmlParserException e) { + throw new RuntimeException(e); + } catch (InternalException e) { + throw new RuntimeException(e); + } + } + + private void doPut(Blob blob, String k, boolean ignoreNulls) throws ServerException, InsufficientDataException, ErrorResponseException, IOException, NoSuchAlgorithmException, InvalidKeyException, InvalidResponseException, XmlParserException, InternalException { + byte[] value = blob.getValue(); + if (value == null && ignoreNulls) { + return; + } + + if (value == null) { + value = new byte[0]; + } + + try (InputStream is = new ByteArrayInputStream(value)) { + PutObjectArgs.Builder builder = PutObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .stream(is, value.length, -1) + .contentType(blob.getMediaTypeId()); + + if (blob.getDescription() != null) { + builder.userMetadata(java.util.Collections.singletonMap("description", blob.getDescription())); + } + + client.putObject(builder.build()); + } + } + + @Override + public void delete(String office, String id) { + String k = key(office, id); + try { + client.removeObject(RemoveObjectArgs.builder() + .bucket(requiredBucket()) + .object(k) + .build()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private String findFirstKeyById(String id) { + String targetSuffix = "/" + normalizeId(id).toUpperCase(Locale.ROOT); + try { + ListObjectsArgs args = ListObjectsArgs.builder() + .bucket(requiredBucket()) + .recursive(true) + .build(); + for (Result res : client.listObjects(args)) { + try { + Item item = res.get(); + String name = item.objectName(); + if (name.toUpperCase(Locale.ROOT).endsWith(targetSuffix)) { + return name; + } + } catch (Exception ignore) { + } + } + return null; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static String officeFromKey(String key) { + int slash = key.indexOf('/'); + return (slash > 0) ? key.substring(0, slash) : null; + } + + private static String idFromKey(String key) { + int slash = key.indexOf('/'); + return (slash >= 0 && slash < key.length() - 1) ? key.substring(slash + 1) : key; + } + + private String requiredBucket() { + String bucket = config.bucket(); + if (bucket == null || bucket.isEmpty()) { + throw new IllegalStateException("Object storage bucket is not configured (blob.store.bucket)"); + } + return bucket; + } + + private static String key(String office, String id) { + String off = office == null ? "" : office.toUpperCase(Locale.ROOT); + String nid = normalizeId(id).toUpperCase(Locale.ROOT); + String fullKey = off + "/" + nid; + if (fullKey.length() > MAX_KEY_LENGTH) { + throw new FieldLengthExceededException("Key", fullKey.length(), MAX_KEY_LENGTH, null, true); + } + return fullKey; + } + + private static String normalizeId(String id) { + if (id == null) return ""; + + if(id.length() > ID_LENGTH_LIMIT){ + throw new FieldLengthExceededException("ID", id.length(), ID_LENGTH_LIMIT, null, true); + } + // Replace spaces with underscore; leave common safe chars; percent-encode others + StringBuilder sb = new StringBuilder(); + for (char c : id.toCharArray()) { + if (Character.isLetterOrDigit(c) || c == '.' || c == '_' || c == '-' ) { + sb.append(c); + } else if (c == ' ') { + sb.append('_'); + } else if (c == '/') { + // keep slash because controller may pass IDs containing '/'; since we prefix with OFFICE/, this would nest more levels + sb.append('/'); + } else { + String hex = Integer.toHexString(c).toUpperCase(Locale.ROOT); + if (hex.length() == 1) hex = "0" + hex; + sb.append('%').append(hex); + } + } + return sb.toString(); + } + + private static byte[] readFully(InputStream is) throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buf = new byte[8192]; + int r; + while ((r = is.read(buf)) != -1) { + baos.write(buf, 0, r); + } + return baos.toByteArray(); + } +} diff --git a/cwms-data-api/src/main/java/cwms/cda/data/dao/ObjectStorageConfig.java b/cwms-data-api/src/main/java/cwms/cda/data/dao/ObjectStorageConfig.java new file mode 100644 index 000000000..442c13514 --- /dev/null +++ b/cwms-data-api/src/main/java/cwms/cda/data/dao/ObjectStorageConfig.java @@ -0,0 +1,64 @@ +package cwms.cda.data.dao; + +import java.util.Optional; + +public class ObjectStorageConfig { + + private final String bucket; + + private final String endpoint; + + private final String accessKey; + private final String secretKey; + + public ObjectStorageConfig(String bucket, String endpoint, + String accessKey, String secretKey) { + + this.bucket = bucket; + + this.endpoint = endpoint; + + this.accessKey = accessKey; + this.secretKey = secretKey; + } + + public static ObjectStorageConfig fromSystem() { + + String bucket = get("blob.store.bucket").orElse(null); + + String endpoint = get("blob.store.endpoint").orElse(null); + + String accessKey = get("blob.store.accessKey").orElse(null); + String secretKey = get("blob.store.secretKey").orElse(null); + return new ObjectStorageConfig(bucket, endpoint, accessKey, secretKey); + } + + private static Optional get(String key) { + String sys = System.getProperty(key); + if (sys != null && !sys.isEmpty()) return Optional.of(sys); + String env = System.getenv(toEnvKey(key)); + if (env != null && !env.isEmpty()) return Optional.of(env); + return Optional.empty(); + } + + private static String toEnvKey(String key) { + return key.toUpperCase().replace('.', '_'); + } + + + public String bucket() { + return bucket; + } + + public String endpoint() { + return endpoint; + } + + public String accessKey() { + return accessKey; + } + + public String secretKey() { + return secretKey; + } +} diff --git a/cwms-data-api/src/main/java/cwms/cda/features/CdaFeatureManagerProvider.java b/cwms-data-api/src/main/java/cwms/cda/features/CdaFeatureManagerProvider.java new file mode 100644 index 000000000..284a63a7f --- /dev/null +++ b/cwms-data-api/src/main/java/cwms/cda/features/CdaFeatureManagerProvider.java @@ -0,0 +1,34 @@ +package cwms.cda.features; + +import org.togglz.core.manager.FeatureManager; +import org.togglz.core.manager.FeatureManagerBuilder; +import org.togglz.core.repository.file.FileBasedStateRepository; +import java.io.File; +import org.togglz.core.spi.FeatureManagerProvider; + +public class CdaFeatureManagerProvider implements FeatureManagerProvider { + public static final String DEFAULT_PROPERTIES_FILE = "features.properties"; + public static final String PROPERTIES_FILE = "properties.file"; + private volatile FeatureManager manager; + + @Override + public int priority() { + return 10; + } + + @Override + public FeatureManager getFeatureManager() { + if (manager == null) { + synchronized (this) { + if (manager == null) { + String file = System.getProperty(PROPERTIES_FILE, DEFAULT_PROPERTIES_FILE); + manager = new FeatureManagerBuilder() + .featureEnum(CdaFeatures.class) + .stateRepository(new FileBasedStateRepository(new File(file))) + .build(); + } + } + } + return manager; + } +} diff --git a/cwms-data-api/src/main/java/cwms/cda/features/CdaFeatures.java b/cwms-data-api/src/main/java/cwms/cda/features/CdaFeatures.java new file mode 100644 index 000000000..fa2a662ef --- /dev/null +++ b/cwms-data-api/src/main/java/cwms/cda/features/CdaFeatures.java @@ -0,0 +1,9 @@ +package cwms.cda.features; + +import org.togglz.core.Feature; +import org.togglz.core.annotation.Label; + +public enum CdaFeatures implements Feature { + @Label("Use object-storage backed Blob DAO in BlobController") + USE_OBJECT_STORAGE_BLOBS +} diff --git a/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerObjectStorageTestIT.java b/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerObjectStorageTestIT.java new file mode 100644 index 000000000..854bbdf15 --- /dev/null +++ b/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerObjectStorageTestIT.java @@ -0,0 +1,68 @@ +package cwms.cda.api; + +import cwms.cda.features.CdaFeatures; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.Extension; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.togglz.core.context.FeatureContext; +import org.togglz.core.manager.FeatureManager; + +@Tag("integration") +@ExtendWith(BlobControllerObjectStorageTestIT.FeatureEnableExtension.class) +public class BlobControllerObjectStorageTestIT extends BlobControllerTestIT{ + + static boolean wasActive; + + + // I need this to happen before super.BeforeAll is run so that the create will create into Object-store version +// @BeforeAll +// public static void setup() throws Exception { +// setObjectStoreProperties(); +// +// // now call the method that the super calls. +// createExistingBlob(); +// } + + private static void setObjectStoreProperties() { + FeatureManager featureManager = FeatureContext.getFeatureManager(); + wasActive=featureManager.isActive(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + featureManager.enable(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + featureManager.isActive(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + + // TODO: Need to figure out a cleaner way to do this + System.setProperty("blob.store.endpoint", "http://127.0.0.1:9000"); + System.setProperty("blob.store.bucket", "cwms-test"); + System.setProperty("blob.store.accessKey", "cda_user"); + System.setProperty("blob.store.secretKey", "cda_password"); + } + + @AfterAll + public static void teardown() { + FeatureManager featureManager = FeatureContext.getFeatureManager(); + if(wasActive){ + featureManager.enable(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + } else { + featureManager.disable(CdaFeatures.USE_OBJECT_STORAGE_BLOBS); + } + + } + + @Override + @Test + void test_create_getOne() + { + super.test_create_getOne(); + } + + static class FeatureEnableExtension implements Extension, BeforeAllCallback { + + @Override + public void beforeAll(ExtensionContext context) { + setObjectStoreProperties(); + } + } +} diff --git a/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerTestIT.java b/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerTestIT.java index 446e9e9bd..82c44eb5a 100644 --- a/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerTestIT.java +++ b/cwms-data-api/src/test/java/cwms/cda/api/BlobControllerTestIT.java @@ -18,6 +18,7 @@ import javax.servlet.http.HttpServletResponse; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.time.Duration; import static io.restassured.RestAssured.given; @@ -36,6 +37,11 @@ public class BlobControllerTestIT extends DataApiTestIT { private static final String EXISTING_BLOB_VALUE = "test value"; @BeforeAll + public static void setup() throws Exception { + createExistingBlob(); + } + + static void createExistingBlob() throws Exception { String origDesc = "test description"; @@ -67,7 +73,7 @@ static void createExistingBlob() throws Exception @Test void test_getOne_not_found() throws UnsupportedEncodingException { String blobId = "TEST"; - String urlencoded = URLEncoder.encode(blobId, "UTF-8"); + String urlencoded = URLEncoder.encode(blobId, StandardCharsets.UTF_8); given() .log().ifValidationFails(LogDetail.ALL,true) @@ -406,7 +412,7 @@ void test_pagination_works() { nextPage = pageN.path("next-page"); int pageTotal = pageN.path("blobs.size()"); - assertTrue(pageTotal <= pageSize, "Expected the page to return no more than the configured page size"); + assertTrue(pageTotal <= pageSize, "Expected the page to return no more than the configured page size. Expected " + pageTotal + "<=" + pageSize); totalRetrieved += pageTotal; } while( nextPage != null ); diff --git a/cwms-data-api/src/test/java/cwms/cda/api/RangeParserTest.java b/cwms-data-api/src/test/java/cwms/cda/api/RangeParserTest.java new file mode 100644 index 000000000..75ca185b8 --- /dev/null +++ b/cwms-data-api/src/test/java/cwms/cda/api/RangeParserTest.java @@ -0,0 +1,110 @@ +package cwms.cda.api; + +import org.junit.jupiter.api.Test; + +import java.util.List; + +import static org.junit.jupiter.api.Assertions.*; + +class RangeParserTest { + + @Test + void testResume() { + List ranges = RangeParser.parse("bytes=100-"); + assertNotNull(ranges); + assertEquals(1, ranges.size()); + assertArrayEquals(new long[]{100L, -1L}, ranges.get(0)); + } + + @Test + void testFirstK() { + List ranges = RangeParser.parse("bytes=0-1000"); + assertNotNull(ranges); + assertEquals(1, ranges.size()); + assertArrayEquals(new long[]{0L, 1000L}, ranges.get(0)); + } + + @Test + void testFirstOpen() { + List ranges = RangeParser.parse("bytes=0-"); + assertNotNull(ranges); + assertEquals(1, ranges.size()); + assertArrayEquals(new long[]{0L, -1L}, ranges.get(0)); + } + + @Test + void testSuffixOpen() { + List ranges = RangeParser.parse("bytes=-50"); + assertNotNull(ranges); + assertEquals(1, ranges.size()); + assertArrayEquals(new long[]{-1L, 50L}, ranges.get(0)); + } + + + @Test + void testTwoPart() { + List ranges = RangeParser.parse("bytes=0-10,99-100"); + assertNotNull(ranges); + assertEquals(2, ranges.size()); + assertArrayEquals(new long[]{0L, 10L}, ranges.get(0)); + assertArrayEquals(new long[]{99L, 100L}, ranges.get(1)); + } + + + @Test + void testMultiParse() { + List ranges = RangeParser.parse("bytes=0-99,200-299,-50"); + assertNotNull(ranges); + assertEquals(3, ranges.size()); + assertArrayEquals(new long[]{0L, 99L}, ranges.get(0)); + assertArrayEquals(new long[]{200L, 299L}, ranges.get(1)); + assertArrayEquals(new long[]{-1L, 50L}, ranges.get(2)); + } + + + @Test + void testTwoWeird() { + List ranges = RangeParser.parse("bytes=0-0,-1"); + assertNotNull(ranges); + assertEquals(2, ranges.size()); + assertArrayEquals(new long[]{0L, 0L}, ranges.get(0)); + assertArrayEquals(new long[]{-1L, 1L}, ranges.get(1)); + } + + @Test + void testNotBytes() { + assertThrows(IllegalArgumentException.class, () -> RangeParser.parse("bits=0-10")); + } + + + @Test + void testSuffixDoubleNeg() { + assertThrows(IllegalArgumentException.class, () -> RangeParser.parse("bytes=--64")); + } + + + @Test + void testSuffixClosed() { + assertThrows(IllegalArgumentException.class, () -> + RangeParser.parse("bytes=-50-100")); + } + + + @Test + void testSuffixDoubleClosed() { + assertThrows(IllegalArgumentException.class, () -> RangeParser.parse("bytes=-50--100")); + } + + @Test + void testInterpret(){ + + assertArrayEquals(new long[]{0L, 10L}, RangeParser.interpret(new long[]{0L, 10L}, 100)); + assertArrayEquals(new long[]{0L, 0L}, RangeParser.interpret(new long[]{0L, 0L}, 100)); + assertArrayEquals(new long[]{8L, 12L}, RangeParser.interpret(new long[]{8L, 12L}, 100)); + assertArrayEquals(new long[]{8L, 99L}, RangeParser.interpret(new long[]{8L, 100L}, 100)); + assertArrayEquals(new long[]{8L, 99L}, RangeParser.interpret(new long[]{8L, 200L}, 100)); + + } + +// probably invalid assertArrayEquals(new long[]{8L, 100L}, RangeParser.interpret(new long[]{100L, 200L}, 100)); +} diff --git a/cwms-data-api/src/test/java/cwms/cda/features/CdaFeatureManagerProviderTest.java b/cwms-data-api/src/test/java/cwms/cda/features/CdaFeatureManagerProviderTest.java new file mode 100644 index 000000000..7a2f68a2a --- /dev/null +++ b/cwms-data-api/src/test/java/cwms/cda/features/CdaFeatureManagerProviderTest.java @@ -0,0 +1,77 @@ +package cwms.cda.features; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.togglz.core.manager.FeatureManager; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; + +import static org.junit.jupiter.api.Assertions.*; + +class CdaFeatureManagerProviderTest { + + private String originalPropertiesFile; + + @BeforeEach + void setUp() { + originalPropertiesFile = System.getProperty(CdaFeatureManagerProvider.PROPERTIES_FILE); + } + + @AfterEach + void tearDown() { + if (originalPropertiesFile != null) { + System.setProperty(CdaFeatureManagerProvider.PROPERTIES_FILE, originalPropertiesFile); + } else { + System.clearProperty(CdaFeatureManagerProvider.PROPERTIES_FILE); + } + } + + @Test + void testPriority() { + CdaFeatureManagerProvider provider = new CdaFeatureManagerProvider(); + assertEquals(10, provider.priority()); + } + + @Test + void testGetFeatureManager() { + CdaFeatureManagerProvider provider = new CdaFeatureManagerProvider(); + FeatureManager manager = provider.getFeatureManager(); + assertNotNull(manager); + assertSame(manager, provider.getFeatureManager(), "Should return the same instance"); + } + + @Test + void testUseObjectStorageBlobsFeature() throws IOException { + File tempFile = Files.createTempFile("features", ".properties").toFile(); + tempFile.deleteOnExit(); + + try (FileWriter writer = new FileWriter(tempFile)) { + writer.write(CdaFeatures.USE_OBJECT_STORAGE_BLOBS.name() + " = true"); + } + + System.setProperty(CdaFeatureManagerProvider.PROPERTIES_FILE, tempFile.getAbsolutePath()); + + CdaFeatureManagerProvider provider = new CdaFeatureManagerProvider(); + FeatureManager manager = provider.getFeatureManager(); + + assertTrue(manager.isActive(CdaFeatures.USE_OBJECT_STORAGE_BLOBS)); + } + + @Test + void testFeatureDisabledByDefault() throws IOException { + File tempFile = Files.createTempFile("features_disabled", ".properties").toFile(); + tempFile.deleteOnExit(); + + // Empty file should mean features are disabled by default + System.setProperty(CdaFeatureManagerProvider.PROPERTIES_FILE, tempFile.getAbsolutePath()); + + CdaFeatureManagerProvider provider = new CdaFeatureManagerProvider(); + FeatureManager manager = provider.getFeatureManager(); + + assertFalse(manager.isActive(CdaFeatures.USE_OBJECT_STORAGE_BLOBS)); + } +} diff --git a/docker-compose.yml b/docker-compose.yml index 1254b4dd1..1ef5cb2d0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,11 @@ volumes: oracle_data_1: auth_data: + minio_data: + driver: local + minio_config: + driver: local + services: db: image: ghcr.io/hydrologicengineeringcenter/cwms-database/cwms/database-ready-ora-23.5:latest-dev @@ -50,6 +55,8 @@ services: condition: service_completed_successfully traefik: condition: service_healthy + minio: + condition: service_healthy image: cwms-rest-api:local-dev build: target: api @@ -77,6 +84,11 @@ services: - cwms.dataapi.access.openid.altAuthUrl=http://localhost:${APP_PORT:-8081} - cwms.dataapi.access.openid.useAltWellKnown=true - cwms.dataapi.access.openid.issuer=http://localhost:${APP_PORT:-8081}/auth/realms/cwms + - blob.store.endpoint=http://minio:9000 + - blob.store.region=docker + - blob.store.bucket=cwms-test + - blob.store.accessKey=cda_user + - blob.store.secretKey=cda_password expose: - 7000 - 5005 @@ -133,7 +145,7 @@ services: expose: - "8080" volumes: - - "/var/run/docker.sock:/var/run/docker.sock:ro" + - /var/run/docker.sock:/var/run/docker.sock:ro healthcheck: test: traefik healthcheck --ping command: @@ -147,3 +159,46 @@ services: - "traefik.enable=true" - "traefik.http.routers.traefik.rule=PathPrefix(`/traefik`)" - "traefik.http.routers.traefik.service=api@internal" + + minio: + container_name: minio_server + image: minio/minio:RELEASE.2025-04-22T22-12-26Z + volumes: + - minio_data:/data + - minio_config:/root/.minio + ports: + - '${FORWARD_MINIO_API_PORT0:-9000}:9000' + - '${FORWARD_MINIO_PORT:-9001}:9001' + - '${FORWARD_MINIO_API_PORT2:-9002}:9002' + environment: + MINIO_ROOT_USER: minio_admin + MINIO_ROOT_PASSWORD: saersdbewadfqewrbwreq12rfgweqrffw52354ec@%fwewEFFWSE + command: server /data --console-address ":9001" + healthcheck: + test: [ "CMD", "mc", "ready", "local" ] + interval: 5s + timeout: 5s + retries: 5 + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "0.5" + memory: 1G + limits: + cpus: "1.0" + memory: 2G + + minio-setup: + image: minio/mc:latest + depends_on: + minio: + condition: service_healthy + entrypoint: > + /bin/sh -c " + /usr/bin/mc alias set myminio http://minio:9000 minio_admin saersdbewadfqewrbwreq12rfgweqrffw52354ec@%fwewEFFWSE; + /usr/bin/mc admin user add myminio cda_user cda_password; + /usr/bin/mc mb --ignore-existing myminio/cwms-test; + /usr/bin/mc admin policy attach myminio readwrite --user cda_user; + exit 0; + " diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 1e49a183e..9ff8d2679 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -36,6 +36,8 @@ freemarker = "2.3.32" auto-service = "1.1.1" openapi-validation = "2.44.9" javaparser = "3.26.2" +togglz = "3.3.3" +minio = "8.6.0" #Overrides classgraph = { strictly = '4.8.176' } @@ -79,7 +81,9 @@ jackson-datatype-jsr310 = { module = "com.fasterxml.jackson.datatype:jackson-dat jackson-dataformat-xml = { module = "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", version.ref = "jackson" } jackson-datatype-jdk8 = { module = "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", version.ref = "jackson" } +togglz-core = { module = "org.togglz:togglz-core", version.ref = "togglz" } +minio = { module = "io.minio:minio", version.ref = "minio"} #compile compileOnly javaee-web-api = { module = "javax:javaee-web-api", version.ref = "java-ee" }