diff --git a/packages/core/src/abstract/render-server.ts b/packages/core/src/abstract/render-server.ts index ed97d268..008896ea 100644 --- a/packages/core/src/abstract/render-server.ts +++ b/packages/core/src/abstract/render-server.ts @@ -52,11 +52,13 @@ export class RenderServer { cache: AsyncDataCache; private clients: Map; private maxSize: vec2; + private cancelled: boolean; constructor(maxSize: vec2, extensions: string[], cacheByteLimit: number = 2000 * oneMB) { this.canvas = new OffscreenCanvas(10, 10); // we always render to private buffers, so we dont need a real resolution here... this.clients = new Map(); this.maxSize = maxSize; this.refreshRequested = false; + this.cancelled = false; const gl = this.canvas.getContext('webgl', { alpha: true, preserveDrawingBuffer: false, @@ -119,6 +121,9 @@ export class RenderServer { } } private requestComposition(client: Client, composite: Compositor) { + if (this.cancelled) { + return; + } const c = this.clients.get(client); if (c) { if (!c.updateRequested) { @@ -145,6 +150,16 @@ export class RenderServer { } this.clients.delete(client); } + destroyServer() { + this.cancelled = true; // we need this flag, + // because when we inform clients that they are cancelled, + // they could respond by requesting a new frame! + for (const c of this.clients.values()) { + c.frame?.cancelFrame(); + } + this.clients.clear(); + this.regl.destroy(); + } private prepareToRenderToClient(client: Client) { const previousEntry = this.clients.get(client); if (previousEntry) { diff --git a/packages/omezarr/src/sliceview/loader.ts b/packages/omezarr/src/sliceview/loader.ts index af05bc71..a60abce4 100644 --- a/packages/omezarr/src/sliceview/loader.ts +++ b/packages/omezarr/src/sliceview/loader.ts @@ -8,7 +8,7 @@ import { } from '@alleninstitute/vis-geometry'; import type { Chunk } from 'zarrita'; import type { ZarrRequest } from '../zarr/loading'; -import { loadSlice, pickBestScale, planeSizeInVoxels, sizeInUnits } from '../zarr/loading'; +import { indexOfRelativeSlice, loadSlice, pickBestScale, planeSizeInVoxels, sizeInUnits } from '../zarr/loading'; import type { VoxelTileImage } from './slice-renderer'; import type { OmeZarrMetadata, OmeZarrShapedDataset } from '../zarr/types'; @@ -93,16 +93,29 @@ export function getVisibleTiles( view: box2D; screenSize: vec2; }, - plane: CartesianPlane, - orthoVal: number, + plane: CartesianPlane, // the plane along which we extract a slice + planeLocation: // where that slice sits in the volume along the axis that is orthagonal to the plane of the slice - eg. Z for XY slices + | { + // EITHER + index: number; // the specific index (caution - not all volumes have the same number of slices at each level of detail) + parameter?: never; + } // OR + | { + parameter: number; // a parameter [0:1] along the axis, 0 would be the first slice, 1 would be the last + index?: never; + }, metadata: OmeZarrMetadata, tileSize: number, ): VoxelTile[] { // TODO (someday) open the array, look at its chunks, use that size for the size of the tiles I request! - const layer = pickBestScale(metadata, plane, camera.view, camera.screenSize); - return getVisibleTilesInLayer(camera, plane, orthoVal, metadata, tileSize, layer); + const sliceIndex = + planeLocation.index ?? + indexOfRelativeSlice(layer, metadata.attrs.multiscales[0].axes, planeLocation.parameter, plane.ortho); + + return getVisibleTilesInLayer(camera, plane, sliceIndex, metadata, tileSize, layer); } + /** * a function which returns a promise of float32 data from the requested region of an omezarr dataset. * Note that omezarr decoding can be slow - consider wrapping this function in a web-worker (or a pool of them) diff --git a/packages/omezarr/src/sliceview/slice-renderer.ts b/packages/omezarr/src/sliceview/slice-renderer.ts index 1db1f422..d60fc31d 100644 --- a/packages/omezarr/src/sliceview/slice-renderer.ts +++ b/packages/omezarr/src/sliceview/slice-renderer.ts @@ -30,13 +30,20 @@ export type RenderSettingsChannel = { export type RenderSettingsChannels = { [key: string]: RenderSettingsChannel; }; - export type RenderSettings = { camera: { view: box2D; screenSize: vec2; }; - orthoVal: number; // the value of the orthogonal axis, e.g. Z value relative to an XY plane + planeLocation: + | { + parameter?: never; + index: number; + } + | { + index?: never; + parameter: number; + }; tileSize: number; plane: CartesianPlane; channels: RenderSettingsChannels; @@ -144,8 +151,8 @@ export function buildOmeZarrSliceRenderer( }, destroy: () => {}, getVisibleItems: (dataset, settings) => { - const { camera, plane, orthoVal, tileSize } = settings; - return getVisibleTiles(camera, plane, orthoVal, dataset, tileSize); + const { camera, plane, planeLocation, tileSize } = settings; + return getVisibleTiles(camera, plane, planeLocation, dataset, tileSize); }, fetchItemContent: (item, dataset, settings, signal) => { const contents: Record Promise> = {}; diff --git a/packages/omezarr/src/zarr/loading.ts b/packages/omezarr/src/zarr/loading.ts index dbbd3bd8..338e5f7c 100644 --- a/packages/omezarr/src/zarr/loading.ts +++ b/packages/omezarr/src/zarr/loading.ts @@ -178,10 +178,27 @@ export function pickBestScale( }, datasets[0]); return choice ?? datasets[datasets.length - 1]; } - +// TODO this is a duplicate of indexOfDimension... delete one of them! function indexFor(dim: ZarrDimension, axes: readonly OmeZarrAxis[]) { return axes.findIndex((axis) => axis.name === dim); } +/** + * + * @param layer a shaped layer from within the omezarr dataset + * @param axes the axes describing this omezarr dataset + * @param parameter a value from [0:1] indicating a parameter of the volume, along the given dimension @param dim, + * @param dim the dimension (axis) along which @param parameter refers + * @returns a valid index (between [0,layer.shape[axis] ]) from the volume, suitable for + */ +export function indexOfRelativeSlice( + layer: OmeZarrShapedDataset, + axes: readonly OmeZarrAxis[], + parameter: number, + dim: ZarrDimension, +): number { + const dimIndex = indexFor(dim, axes); + return Math.floor(layer.shape[dimIndex] * Math.max(0, Math.min(1, parameter))); +} /** * determine the size of a slice of the volume, in the units specified by the axes metadata diff --git a/packages/precomputed/package.json b/packages/precomputed/package.json new file mode 100644 index 00000000..7bd55d75 --- /dev/null +++ b/packages/precomputed/package.json @@ -0,0 +1,55 @@ +{ + "name": "@alleninstitute/vis-precomputed", + "version": "0.0.1", + "contributors": [ + { + "name": "Lane Sawyer", + "email": "lane.sawyer@alleninstitute.org" + }, + { + "name": "Noah Shepard", + "email": "noah.shepard@alleninstitute.org" + }, + { + "name": "Skyler Moosman", + "email": "skyler.moosman@alleninstitute.org" + }, + { + "name": "Su Li", + "email": "su.li@alleninstitute.org" + }, + { + "name": "Joel Arbuckle", + "email": "joel.arbuckle@alleninstitute.org" + } + ], + "license": "BSD-3-Clause", + "source": "src/index.ts", + "main": "dist/main.js", + "module": "dist/module.js", + "types": "dist/types.d.ts", + "files": ["dist"], + "scripts": { + "typecheck": "tsc --noEmit", + "build": "parcel build --no-cache", + "watch": "parcel watch", + "test": "vitest --watch", + "test:ci": "vitest run", + "coverage": "vitest run --coverage" + }, + "repository": { + "type": "git", + "url": "https://github.com/AllenInstitute/vis.git" + }, + "publishConfig": { + "registry": "https://npm.pkg.github.com/AllenInstitute" + }, + "packageManager": "pnpm@9.14.2", + "dependencies": { + "@alleninstitute/vis-geometry": "workspace:*", + "@alleninstitute/vis-core": "workspace:*", + "regl": "2.1.0", + "ts-pattern": "5.7.1", + "zod": "3.24.3" + } +} diff --git a/packages/precomputed/src/index.ts b/packages/precomputed/src/index.ts new file mode 100644 index 00000000..268d76cd --- /dev/null +++ b/packages/precomputed/src/index.ts @@ -0,0 +1,14 @@ +export { + type AnnotationInfo, + isPointAnnotation, + isBoxAnnotation, + isEllipsoidAnnotation, + isLineAnnotation, + parseInfoFromJson as ParseNGPrecomputedInfo, + getAnnotations, +} from './loader/annotations'; +export { + buildNGPointAnnotationRenderer, + buildAsyncNGPointRenderer, + type AnnotationChunk, +} from './render/annotationRenderer'; diff --git a/packages/precomputed/src/loader/annotations.test.ts b/packages/precomputed/src/loader/annotations.test.ts new file mode 100644 index 00000000..91842545 --- /dev/null +++ b/packages/precomputed/src/loader/annotations.test.ts @@ -0,0 +1,70 @@ +import { describe, expect, it } from 'vitest'; +import { + AnnoStream, + type AnnotationInfo, + computeStride, + extractPoint, + getAnnotationBuffer, + isPointAnnotation, + parseInfoFromJson, +} from './annotations'; + +describe('quick check', () => { + it('can parse a real (although simple) file, at least a little bit', async () => { + const base = + 'https://aind-open-data.s3.amazonaws.com/SmartSPIM_787715_2025-04-08_18-33-36_stitched_2025-04-09_22-42-59/image_cell_segmentation/Ex_445_Em_469/visualization/detected_precomputed/'; + const expectedMetadata: AnnotationInfo<'point'> = { + annotation_type: 'point', // TODO: the real json files here use lowercase, wtf + type: 'neuroglancer_annotations_v1', + dimensions: [ + { name: 'z', scale: 2e-6, unit: 'm' }, + { name: 'y', scale: 1.8e-6, unit: 'm' }, + { name: 'x', scale: 1.8e-6, unit: 'm' }, + ], + lower_bound: [4.0, 94.0, 558.0], + upper_bound: [3542.0, 8784.0, 7166.0], + properties: [], + relationships: [], + by_id: { key: 'by_id' }, //what? + spatial: [ + { + key: 'spatial0', + grid_shape: [1, 1, 1], + chunk_size: [3538.0, 8690.0, 6608.0], + limit: 150378, + }, + ], + }; + const infoFileJSON = await (await fetch(`${base}info`)).json(); + // biome-ignore lint/style/noNonNullAssertion: this is a test + const sanitized = parseInfoFromJson(infoFileJSON)!; + const stride = computeStride(expectedMetadata); + expect(stride).toBe(12); + expect(sanitized).toEqual(expectedMetadata); + const raw = await getAnnotationBuffer(base, expectedMetadata, { level: 0, cell: [0, 0, 0] }); + expect(raw.numAnnotations).toBe(150378n); + // each annotation (its shape and its properties) are written sequentially in the buffer,followed by all the ids for the annotations, like this: + // [{num_annotations:uint64},{annotation_0_and_properties_and_optional_padding},...,{annotation_n_and_properties_and_optional_padding},{id_of_anno_0:uint64},...,{id_of_anno_n:uint64}}] + // thus: 8 + (length*stride) + (size_in_bytes(uint64)*length) + expect(raw.view.buffer.byteLength).toBe(150378 * stride + 8 + 150378 * 8); + if (isPointAnnotation(sanitized)) { + const annoStream = await AnnoStream(sanitized, extractPoint, raw.view, raw.numAnnotations); + let count = 0; + // the ids in here just count up... I think the spec says they should be added at random when doing spatial indexing, so this is sus.... + let lastId: bigint | undefined; + for (const point of annoStream) { + count += 1; + if (lastId === undefined) { + lastId = point.id; + } else { + expect(point.id).toBe(lastId + 1n); + lastId = point.id; + } + expect(point.properties).toEqual({}); + } + expect(count).toBe(150378); + } else { + expect(sanitized?.annotation_type).toBe('point'); + } + }); +}); diff --git a/packages/precomputed/src/loader/annotations.ts b/packages/precomputed/src/loader/annotations.ts new file mode 100644 index 00000000..f7bc65ce --- /dev/null +++ b/packages/precomputed/src/loader/annotations.ts @@ -0,0 +1,511 @@ +import { Box3D, type box3D, Vec2, type vec2, type vec3, Vec3 } from '@alleninstitute/vis-geometry'; +import { match } from 'ts-pattern'; +import { z } from 'zod'; +// a simple reader for NG precomputed annotation data-sources +// see https://github.com/google/neuroglancer/blob/master/src/datasource/precomputed/annotations.md +// for details +type Ints = 'uint' | 'int'; +type Floats = 'float32'; +type Bits = '8' | '16' | '32'; +type ScalarProperties = `${Ints}${Bits}` | Floats; +type PropertyTypes = ScalarProperties | 'rgb' | 'rgba'; +type NGUnit = 'm' | 's' | ''; // TODO go find the complete set +type Dimension = { name: string; scale: number; unit: NGUnit }; +type AnnotationType = 'point' | 'line' | 'axis_aligned_bounding_box' | 'ellipsoid'; +type SpatialIndexLevel = { + key: string; + sharding?: boolean | undefined; + grid_shape: readonly number[]; + chunk_size: readonly number[]; + limit: number; +}; +type Relation = { + id: string; + key: string; + sharding?: boolean | undefined; // todo the spec has a broken link on what this is... +}; +type NGAnnotationProperty = Readonly<{ + id: string; + type: PropertyTypes; + description: string; + enum_values?: undefined | readonly number[]; + enum_labels?: undefined | readonly string[]; +}>; +// this is a type corresponding to the contents of the annotation info file: +// see https://github.com/google/neuroglancer/blob/master/src/datasource/precomputed/annotations.md +// note that a few liberties have been taken for clarity +// note also that order matters in this structure: the values in the bounding box for example, +// line up with the order of the dimensions, +// and the ordering in the properties array is used when extracting a property value from the +// binary encoded payload: https://github.com/google/neuroglancer/blob/master/src/datasource/precomputed/annotations.md#single-annotation-encoding +export type AnnotationInfo = { + type: 'neuroglancer_annotations_v1'; + dimensions: readonly Dimension[]; + lower_bound: readonly number[]; + upper_bound: readonly number[]; + annotation_type: K; + properties: readonly NGAnnotationProperty[]; // coarse to fine (notably the opposite of ome-zarr convention) + relationships: readonly Relation[]; + by_id: { + key: string; + sharding?: boolean | undefined; // at the time of writing, this is a 404: https://github.com/google/neuroglancer/blob/master/src/datasource/precomputed/sharding.md#sharding-specification + }; + spatial: readonly SpatialIndexLevel[]; +}; +type UnknownAnnotationInfo = AnnotationInfo; +// return the size, in bytes, of each annotation in the encoded file +// for example, a point annotation, in 3 dimensions, with a single rgb property, would be +// 4*3 + 3 (4bytes per float, 3 floats per point) + (one byte (uint8) each for red,blue and gree) = 15, plus one byte for padding out to 4-byte alignment = 16 +const vertexPerAnnotation: Record = { + axis_aligned_bounding_box: 2, // min/max corners + ellipsoid: 2, // center/size + line: 2, // start/end + point: 1, // itself +}; +const bytesPerProp: Record = { + float32: 4, + int16: 2, + int32: 4, + int8: 1, + rgb: 3, + rgba: 4, + uint16: 2, + uint32: 4, + uint8: 1, +}; +export function computeStride(info: UnknownAnnotationInfo) { + const rank = info.dimensions.length; + const shapeDataFloats = rank * vertexPerAnnotation[info.annotation_type]; + const shapeDataBytes = 4 * shapeDataFloats; + const propBytes = computePropertyStride(info); + const unAligned = shapeDataBytes + propBytes; + // ok now add padding until bytes%4==0 + const aligned = unAligned % 4 === 0 ? unAligned : unAligned + (4 - (unAligned % 4)); + return aligned; +} +function computePropertyStride(info: UnknownAnnotationInfo) { + return info.properties.reduce((bytes, prop) => { + return bytes + bytesPerProp[prop.type]; + }, 0); +} +type GenericVector = Record; +type RGB = { r: number; g: number; b: number }; +type RGBA = RGB & { a: number }; +type rgbaProp = { type: 'rgba'; value: RGBA }; +type rgbProp = { type: 'rgb'; value: RGB }; +type scalarProp = { type: ScalarProperties; value: number }; +type withProps = { properties: Record }; +type Point = { point: GenericVector } & withProps; +type Ellipse = { center: GenericVector; radius: GenericVector } & withProps; +type Box = { min: GenericVector; max: GenericVector } & withProps; +type Line = { start: GenericVector; end: GenericVector } & withProps; + +// not very elegant, but there are only 4 kinds so this is fine I think. +type ExtractorResult = K extends 'line' + ? Line + : K extends 'point' + ? Point + : K extends 'axis_aligned_bounding_box' + ? Box + : Ellipse; + +function extractVec( + view: DataView, + info: UnknownAnnotationInfo, + offset: number, +): { offset: number; vec: GenericVector } { + const vec: GenericVector = {}; + let off = offset; + for (const dim of info.dimensions) { + vec[dim.name] = view.getFloat32(off, true); + off += 4; + } + return { offset: off, vec }; +} +function extractOnePropSet( + view: DataView, + info: UnknownAnnotationInfo, + offset: number, +): { offset: number } & withProps { + const props: Record = {}; + let off = offset; + for (const prop of info.properties) { + props[prop.id] = match(prop.type) + .with( + 'rgb', + () => + ({ + type: 'rgb', + value: { r: view.getUint8(off), g: view.getUint8(off + 1), b: view.getUint8(off + 2) }, + }) as const, + ) + .with( + 'rgba', + () => + ({ + type: 'rgba', + value: { + r: view.getUint8(off), + g: view.getUint8(off + 1), + b: view.getUint8(off + 2), + a: view.getUint8(off + 3), + }, + }) as const, + ) + .with('uint8', () => ({ type: 'uint8', value: view.getUint8(off) }) as const) + .with('uint16', () => ({ type: 'uint16', value: view.getUint16(off, true) }) as const) + .with('uint32', () => ({ type: 'uint32', value: view.getUint32(off, true) }) as const) + .with('int8', () => ({ type: 'int8', value: view.getInt8(off) }) as const) + .with('int16', () => ({ type: 'int16', value: view.getInt16(off, true) }) as const) + .with('int32', () => ({ type: 'int32', value: view.getInt32(off, true) }) as const) + .with('float32', () => ({ type: 'float32', value: view.getFloat32(off, true) }) as const) + .exhaustive(); + // now update off based on how much we read... + off += bytesPerProp[prop.type]; + } + return { properties: props, offset: off }; +} +function extractTwo( + view: DataView, + info: UnknownAnnotationInfo, + offset: number, +): { A: GenericVector; B: GenericVector; offset: number } & withProps { + const A = extractVec(view, info, offset); + const B = extractVec(view, info, A.offset); + const props = extractOnePropSet(view, info, B.offset); + return { + A: A.vec, + B: B.vec, + properties: props.properties, + offset: props.offset, + }; +} + +export function extractPoint( + view: DataView, + info: AnnotationInfo<'point'>, + offset: number, +): { annotation: Point; offset: number } { + const pnt = extractVec(view, info, offset); + const props = extractOnePropSet(view, info, pnt.offset); + return { + annotation: { + point: pnt.vec, + properties: props.properties, + }, + offset: props.offset, + }; +} +export function extractBox( + view: DataView, + info: AnnotationInfo<'axis_aligned_bounding_box'>, + offset: number, +): { annotation: Box; offset: number } { + const { A, B, offset: off, properties } = extractTwo(view, info, offset); + return { + annotation: { + min: A, + max: B, + properties, + }, + offset: off, + }; +} +export function extractEllipse( + view: DataView, + info: AnnotationInfo<'ellipsoid'>, + offset: number, +): { annotation: Ellipse; offset: number } { + const { A, B, offset: off, properties } = extractTwo(view, info, offset); + return { + annotation: { + center: A, + radius: B, + properties, + }, + offset: off, + }; +} +export function extractLine( + view: DataView, + info: AnnotationInfo<'line'>, + offset: number, +): { annotation: Line; offset: number } { + const { A, B, offset: off, properties } = extractTwo(view, info, offset); + return { + annotation: { + start: A, + end: B, + properties, + }, + offset: off, + }; +} +export function* AnnoStream( + info: AnnotationInfo, + extractor: ( + view: DataView, + info: AnnotationInfo, + offset: number, + ) => { annotation: ExtractorResult; offset: number }, + view: DataView, + count: bigint, +) { + const stride = computeStride(info); + // if you had a buffer that actually needed a bigint to index it... I think that might be very implausible! + const lilCount = Number(count); + const idStart = lilCount * stride; + let offset = 0; + for (let i = 0n; i < count; i++) { + const result = extractor(view, info, offset); + const bigID = view.getBigUint64(idStart + Number(i) * 8, true); + offset += stride; + yield { ...result.annotation, id: bigID }; + } + return null; +} +export async function getAnnotationBuffer( + baseurl: string, // the url at which the info.json file was found + info: UnknownAnnotationInfo, + spatial: { + level: number; + cell: readonly number[]; + }, +) { + const { level, cell } = spatial; + const lvl = info.spatial[level]; + // go fetch the file to start... + const name = `${baseurl}${lvl.key}/${cell.join('_')}`; + const raw = await (await fetch(name)).arrayBuffer(); + // first, get the count. its a 64bit value, and its first + const first = new DataView(raw); + const numAnnotations = first.getBigUint64(0, true); + const view = new DataView(raw, 8); + return { view, numAnnotations }; +} +export async function getAnnotations( + baseurl: string, // the url at which the info.json file was found + info: AnnotationInfo, + spatial: { + level: number; + cell: readonly number[]; + }, + extractor: ( + view: DataView, + info: AnnotationInfo, + offset: number, + ) => { annotation: ExtractorResult; offset: number }, +) { + const { level, cell } = spatial; + const lvl = info.spatial[level]; + // go fetch the file to start... + const name = `${baseurl}${lvl.key}/${cell.join('_')}`; + const raw = await (await fetch(name)).arrayBuffer(); + // first, get the count. its a 64bit value, and its first + const first = new DataView(raw); + const numAnnotations = first.getBigUint64(0, true); + const view = new DataView(raw, 8); + + // TODO: consider if we want the ids (probably yes?) + return { stream: AnnoStream(info, extractor, view, numAnnotations), numAnnotations }; +} +type wtf = PropertyTypes; +const propSchema = z.object({ + id: z.string(), + type: z.union([ + z.literal('rgb'), + z.literal('rgba'), + z.literal('uint8'), + z.literal('uint16'), + z.literal('uint32'), + z.literal('int8'), + z.literal('int16'), + z.literal('int32'), + z.literal('float32'), + ]), + description: z.string(), + enum_values: z.optional(z.array(z.number())), + enum_labels: z.optional(z.array(z.string())), +}); +const relSchema = z.object({ + id: z.string(), + key: z.string(), + sharding: z.optional(z.boolean()), // ???? +}); +const spatialSchema = z.object({ + key: z.string(), + sharding: z.optional(z.boolean()), + grid_shape: z.array(z.number()), + chunk_size: z.array(z.number()), + limit: z.number(), +}); +const ng_annotations_v1_schema = z.object({ + '@type': z.literal('neuroglancer_annotations_v1'), + dimensions: z.record(z.tuple([z.number().positive(), z.string()])), + lower_bound: z.array(z.number()), + upper_bound: z.array(z.number()), + annotation_type: z.union([ + z.literal('point'), + z.literal('line'), + z.literal('axis_aligned_bounding_box'), + z.literal('ellipsoid'), + ]), + properties: z.array(propSchema), + relationships: z.array(relSchema), + by_id: z.object({ key: z.string(), sharding: z.optional(z.boolean()) }), + spatial: z.array(spatialSchema), +}); +export function parseInfoFromJson(json: unknown): UnknownAnnotationInfo | undefined { + const { data } = ng_annotations_v1_schema.safeParse(json); + if (data) { + // the idea here is that 🤞 object.keys respects the order in which the properties were listed in the json body itself... + const dims = Object.keys(data.dimensions).map((key, i) => ({ + name: key, + unit: data.dimensions[key][1] as NGUnit, + scale: data.dimensions[key][0], + })); + // TODO this is gross - but not quite as gross as it looks - make the schema nicer! + return { + annotation_type: data.annotation_type, + type: data['@type'], + by_id: data.by_id, + dimensions: dims, + lower_bound: data.lower_bound, + upper_bound: data.upper_bound, + properties: data.properties, + relationships: data.relationships, + spatial: data.spatial, + }; + } +} +export function isPointAnnotation(a: UnknownAnnotationInfo): a is AnnotationInfo<'point'> { + return a.annotation_type === 'point'; +} +export function isBoxAnnotation(a: UnknownAnnotationInfo): a is AnnotationInfo<'axis_aligned_bounding_box'> { + return a.annotation_type === 'axis_aligned_bounding_box'; +} +export function isEllipsoidAnnotation(a: UnknownAnnotationInfo): a is AnnotationInfo<'ellipsoid'> { + return a.annotation_type === 'ellipsoid'; +} +export function isLineAnnotation(a: UnknownAnnotationInfo): a is AnnotationInfo<'line'> { + return a.annotation_type === 'line'; +} + +function projectXYZ( + info: UnknownAnnotationInfo, + orderedAsInfo: readonly T[], + xyz: readonly [string, string, string], +): undefined | readonly [T, T, T] { + const [x, y, z] = xyz; + const X = info.dimensions.findIndex((d) => d.name === x); + const Y = info.dimensions.findIndex((d) => d.name === y); + const Z = info.dimensions.findIndex((d) => d.name === z); + if (X === -1 || Y === -1 || Z === -1) { + return undefined; + } + return [orderedAsInfo[X], orderedAsInfo[Y], orderedAsInfo[Z]]; +} +function projectXY( + info: UnknownAnnotationInfo, + orderedAsInfo: readonly T[], + xy: readonly [string, string], +): undefined | readonly [T, T] { + const [x, y] = xy; + const X = info.dimensions.findIndex((d) => d.name === x); + const Y = info.dimensions.findIndex((d) => d.name === y); + if (X === -1 || Y === -1) { + return undefined; + } + return [orderedAsInfo[X], orderedAsInfo[Y]]; +} +// our grids are of arbitrary high dimensionality - +// we want to traverse an N dimensional grid - +// normally, when you know the dimension up front (e.g 3D grid) you can use nested loops, +// here we have to use a more abstract approach: + +// precondition: cell and shape are the same length, cell is an index within the N-dim grid +// described by shape, indexing starting at 0 +function nextGridCell(cell: readonly number[], shape: readonly number[]): null | readonly number[] { + // add 1 to the right-most value in cell[] - if it would overflow, set it to zero instead, + // and recursively bubble the one forward + const { leftover, v } = bubbleAdd(0, cell, shape); + if (leftover) { + return null; + } + return v; +} +function bubbleAdd( + dim: number, + cell: readonly number[], + shape: readonly number[], +): { v: readonly number[]; leftover: boolean } { + const rank = cell.length; + if (dim === rank - 1) { + if (cell[dim] + 1 >= shape[dim]) { + // we have to bubble-up! + return { v: [0], leftover: true }; + } + return { v: [cell[dim] + 1], leftover: false }; + } + const { v, leftover } = bubbleAdd(dim + 1, cell, shape); + if (leftover) { + if (cell[dim] + 1 >= shape[dim]) { + // keep bubbling! + return { leftover: true, v: [0, ...v] }; + } + return { v: [cell[dim] + 1, ...v], leftover: false }; + } + return { v: [cell[dim], ...v], leftover: false }; +} + +export function visitChunksInLayer( + data: UnknownAnnotationInfo, + layer: number, + queryXYZ: box3D, + xyz: readonly [string, string, string], + visitor: (dataset: UnknownAnnotationInfo, cell: readonly number[], layer: number) => void, +) { + const L = data.spatial[layer]; + let cell: null | readonly number[] = data.dimensions.map((d) => 0); + if (L) { + const cellSize = projectXYZ(data, L.chunk_size, xyz); + if (!cellSize) { + return; // invalid dimensions! + } + while (cell !== null) { + // is cell within the bounds of our query? + const gridIndexXYZ = projectXYZ(data, cell, xyz); + if (!gridIndexXYZ) return; + + const cellBoundsXYZ = Box3D.create( + Vec3.mul(gridIndexXYZ, cellSize), + Vec3.mul(Vec3.add(gridIndexXYZ, [1, 1, 1]), cellSize), + ); + if (Box3D.intersection(queryXYZ, cellBoundsXYZ)) { + visitor(data, cell, layer); + } + cell = nextGridCell(cell, L.grid_shape); + } + } +} +export function dimensionScaleXYZ(data: UnknownAnnotationInfo, xyz: readonly [string, string, string]) { + return ((projectXYZ(data, data.dimensions, xyz)?.map((d) => d.scale) as unknown) ?? [1, 1, 1]) as vec3; +} +export function layerSizeInXY(data: UnknownAnnotationInfo, layer: number, xy: readonly [string, string]): vec2 { + const L = data.spatial[layer]; + if (L) { + const shape = projectXY(data, L.grid_shape, xy); + const size = projectXY(data, L.chunk_size, xy); + if (shape && size) { + return Vec2.mul(shape, size); + } + } + return [0, 0]; +} +export function chunkSizeInXY(data: UnknownAnnotationInfo, layer: number, xy: readonly [string, string]): vec2 { + const L = data.spatial[layer]; + if (L) { + return projectXY(data, L.chunk_size, xy) ?? [0, 0]; + } + return [0, 0]; +} diff --git a/packages/precomputed/src/render/annotationRenderer.ts b/packages/precomputed/src/render/annotationRenderer.ts new file mode 100644 index 00000000..338533ac --- /dev/null +++ b/packages/precomputed/src/render/annotationRenderer.ts @@ -0,0 +1,141 @@ +import { + buildAsyncRenderer, + type CachedVertexBuffer, + type QueueOptions, + type Renderer, +} from '@alleninstitute/vis-core'; +import type REGL from 'regl'; +import { + type AnnotationInfo, + chunkSizeInXY, + dimensionScaleXYZ, + extractPoint, + getAnnotations, + visitChunksInLayer, +} from '../loader/annotations'; +import { Box3D, type box3D, type vec2, Vec2, type vec3, Vec3 } from '@alleninstitute/vis-geometry'; +import { buildPointRenderer } from './pointAnnotationRenderer'; + +export type AnnotationChunk = { + layerIndex: number; + layerKey: string; + chunk_file: string; + cell: readonly number[]; + numAnnotations: number; +}; +type Settings = { + camera: { + view: box3D; + screenSize: vec2; + }; + lodThreshold: number; // a chunk must be bigger than this value, in screen pixels, to be drawn + // NG precomputed annotations have arbitrary dimensionality - xyz maps any 3 dimensions [foo,bar,baz] to [x,y,z] + xyz: readonly [string, string, string]; + color: vec3; + outlineColor: vec3; +}; +type PointAnnotationData = { + positions: CachedVertexBuffer; +}; +type PointAnnotationInfo = AnnotationInfo<'point'> & { url: string }; + +function getVisibleItems(data: PointAnnotationInfo, settings: Settings) { + // find all chunks that intersect our given view + // so long as each chunk's xy-projected width and height is greater than + // lodThreshold + const items: AnnotationChunk[] = []; + const { camera, xyz, lodThreshold } = settings; + const vSize = Vec3.xy(Box3D.size(camera.view)); + const pxPerUnit = Vec2.div(camera.screenSize, vSize); + + for (let i = 0; i < data.spatial.length; i++) { + //check if the layer is above the LOD floor: + const cSize = chunkSizeInXY(data, i, [xyz[0], xyz[1]]); + // lSize is in data-units. + const cPxSize = Vec2.mul(pxPerUnit, cSize); + // cPxSize is the size a chunk would appear to occupy, in screen px + if (cPxSize[0] > lodThreshold || cPxSize[1] > lodThreshold) { + visitChunksInLayer(data, i, camera.view, xyz, (dataset, cell, l) => { + items.push({ + layerIndex: i, + layerKey: data.spatial[i].key, + chunk_file: cell.join('_'), + cell, + numAnnotations: Number(data.spatial[i].limit), + }); + }); + } else { + // abort the loop over all layers early - this layer's chunks + // are too small to see, and that will be even more the case + // as we proceed. + break; + } + } + return items; +} + +export function buildNGPointAnnotationRenderer( + regl: REGL.Regl, +): Renderer { + const cmd = buildPointRenderer(regl); + return { + destroy: () => {}, + getVisibleItems, + isPrepared: (cacheData): cacheData is PointAnnotationData => { + return 'positions' in cacheData && cacheData.positions?.type === 'buffer'; + }, + cacheKey(item, requestKey, data, settings) { + const { xyz } = settings; + return `${data.url}${item.layerKey}/${item.chunk_file}(${xyz.join('|')})`; + }, + fetchItemContent(item, dataset, settings, signal) { + return { + positions: async () => { + const scale = dimensionScaleXYZ(dataset, settings.xyz); + const { stream, numAnnotations } = await getAnnotations( + dataset.url, + dataset, + { level: item.layerIndex, cell: item.cell }, + extractPoint, + ); + + // TODO: we could... upload the whole buffer to GPU + // and use vertex binding strides to get at the data... + // however the buffer is way bigger than we need (ids...) + // and there are annoying byte alignment issues to consider - lets try this for now, maybe it will be fast enough + const xyzs = new Float32Array(Number(numAnnotations) * 3); + let i = 0; + + for (const v of stream) { + xyzs[i * 3] = scale[0] * (v.point[settings.xyz[0]] ?? 0); + xyzs[i * 3 + 1] = scale[1] * (v.point[settings.xyz[1]] ?? 0); + xyzs[i * 3 + 2] = scale[2] * (v.point[settings.xyz[2]] ?? 0); + i += 1; + } + return { + buffer: regl.buffer(xyzs), + bytes: xyzs.byteLength, + type: 'buffer', + }; + }, + }; + }, + renderItem(target: REGL.Framebuffer2D | null, item, dataset, settings, points) { + const { color, outlineColor, camera } = settings; + const { view } = camera; + cmd({ + color, + outlineColor, + positions: points.positions.buffer, + pointSize: 8, + target, + view, + count: item.numAnnotations, + }); + }, + }; +} + +export function buildAsyncNGPointRenderer(regl: REGL.Regl, options?: QueueOptions) { + return buildAsyncRenderer(buildNGPointAnnotationRenderer(regl), options); +} diff --git a/packages/precomputed/src/render/pointAnnotationRenderer.ts b/packages/precomputed/src/render/pointAnnotationRenderer.ts new file mode 100644 index 00000000..c61e0b2c --- /dev/null +++ b/packages/precomputed/src/render/pointAnnotationRenderer.ts @@ -0,0 +1,121 @@ +import { type box3D, Vec3, type box2D, type vec2, type vec3, type vec4 } from '@alleninstitute/vis-geometry'; +import type REGL from 'regl'; +import type { Framebuffer2D } from 'regl'; + +// the dataset: some AnnotationInfo object +// the item: a single chunk of a layer of the spatial index +// the fetch is one single huge buffer +// the problem is we probably want only a few of the dimensions, props, etc... +// lets start and see how it falls out + +const vert = ` +precision highp float; + attribute vec3 position; + + uniform float pointSize; + uniform vec4 view; + uniform vec2 zNearFar; + varying float opacity; + + void main(){ + gl_PointSize=pointSize; + float zRange = zNearFar.y - zNearFar.x; + float zMid = zNearFar.x+(zRange/2.0); + vec3 viewSize = vec3(view.zw-view.xy,zRange); + // TODO: its unclear if values within a chunk are relative to their chunk + + vec3 pos = (position-vec3(view.xy,0.0))/viewSize; + opacity= clamp(((zRange/10.0)-(position.z-zMid))/(zRange/10.0),0.0,1.0); + vec3 clip = (pos*2.0)-1.0; + gl_Position = vec4(clip,1); + } +`; +const frag = ` +precision highp float; + +uniform vec3 color; +uniform vec3 outlineColor; +varying float opacity; + +void main(){ + + vec2 circleCoord = (2.0 * gl_PointCoord.xy)-1.0; + if(dot(circleCoord,circleCoord)>1.0){ + discard; + } + vec3 clr = mix(color,outlineColor, smoothstep(0.7,0.8, length(circleCoord))); + gl_FragColor = vec4(clr, opacity); +} +`; + +type Settings = { + camera: box2D; + // NG precomputed annotations have arbitrary dimensionality - xyz maps any 3 dimensions [foo,bar,baz] to [x,y,z] + xyz: readonly [string, string, string]; + color: vec3; + outlineColor: vec3; + size: number; +}; +type InnerProps = { + target: Framebuffer2D | null; + zNearFar: vec2; + color: vec3; + pointSize: number; + count: number; + outlineColor: vec3; + positions: REGL.Buffer; + view: vec4; +}; +type RenderProps = { + target: Framebuffer2D | null; + color: vec3; + outlineColor: vec3; + pointSize: number; + positions: REGL.Buffer; + view: box3D; + count: number; +}; +type Unis = { + view: vec4; + color: vec3; + zNearFar: vec2; + outlineColor: vec3; + pointSize: number; +}; +type Attrs = { + position: REGL.Buffer; +}; +export function buildPointRenderer(regl: REGL.Regl) { + const cmd = regl({ + vert, + frag, + attributes: { + position: regl.prop('positions'), + }, + uniforms: { + view: regl.prop('view'), + pointSize: regl.prop('pointSize'), + color: regl.prop('color'), + outlineColor: regl.prop('outlineColor'), + zNearFar: regl.prop('zNearFar'), + }, + depth: { + enable: true, + }, + blend: { + enable: true, + }, + count: regl.prop('count'), + framebuffer: regl.prop('target'), + primitive: 'points', + }); + + return (props: RenderProps) => { + const view: vec4 = [...Vec3.xy(props.view.minCorner), ...Vec3.xy(props.view.maxCorner)]; + cmd({ + ...props, + view, + zNearFar: [props.view.minCorner[2], props.view.maxCorner[2]], + }); + }; +} diff --git a/packages/precomputed/tsconfig.json b/packages/precomputed/tsconfig.json new file mode 100644 index 00000000..e9766f06 --- /dev/null +++ b/packages/precomputed/tsconfig.json @@ -0,0 +1,14 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "paths": { + "~/*": ["./*"] + }, + "moduleResolution": "Bundler", + "module": "ES2022", + "target": "ES2022", + "lib": ["es2022", "DOM"] + }, + "include": ["./src/index.ts"], + "exclude": ["tests/", "**/*.test.ts"] +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b9de4865..453633b5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -84,6 +84,24 @@ importers: specifier: 3.25.50 version: 3.25.50 + packages/precomputed: + dependencies: + '@alleninstitute/vis-core': + specifier: workspace:* + version: link:../core + '@alleninstitute/vis-geometry': + specifier: workspace:* + version: link:../geometry + regl: + specifier: 2.1.0 + version: 2.1.0 + ts-pattern: + specifier: 5.7.1 + version: 5.7.1 + zod: + specifier: 3.24.3 + version: 3.24.3 + site: dependencies: '@alleninstitute/vis-core': @@ -98,6 +116,9 @@ importers: '@alleninstitute/vis-omezarr': specifier: workspace:* version: link:../packages/omezarr + '@alleninstitute/vis-precomputed': + specifier: workspace:* + version: link:../packages/precomputed '@astrojs/check': specifier: 0.9.4 version: 0.9.4(typescript@5.8.3) @@ -3579,6 +3600,9 @@ packages: trough@2.2.0: resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} + ts-pattern@5.7.1: + resolution: {integrity: sha512-EGs8PguQqAAUIcQfK4E9xdXxB6s2GK4sJfT/vcc9V1ELIvC4LH/zXu2t/5fajtv6oiRCxdv7BgtVK3vWgROxag==} + tsconfck@3.1.5: resolution: {integrity: sha512-CLDfGgUp7XPswWnezWwsCRxNmgQjhYq3VXHM0/XIRxhVrKw0M1if9agzryh1QS3nxjCROvV+xWxoJO1YctzzWg==} engines: {node: ^18 || >=20} @@ -4046,6 +4070,9 @@ packages: typescript: ^4.9.4 || ^5.0.2 zod: ^3 + zod@3.24.3: + resolution: {integrity: sha512-HhY1oqzWCQWuUqvBFnsyrtZRhyPeR7SUGv+C4+MsisMuVfSPx8HpwWqH8tRahSlt6M3PiFAcoeFhZAqIXTxoSg==} + zod@3.25.50: resolution: {integrity: sha512-VstOnRxf4tlSq0raIwbn0n+LA34SxVoZ8r3pkwSUM0jqNiA/HCMQEVjTuS5FZmHsge+9MDGTiAuHyml5T0um6A==} @@ -8387,6 +8414,8 @@ snapshots: trough@2.2.0: {} + ts-pattern@5.7.1: {} + tsconfck@3.1.5(typescript@5.8.3): optionalDependencies: typescript: 5.8.3 @@ -8828,6 +8857,8 @@ snapshots: typescript: 5.8.3 zod: 3.25.50 + zod@3.24.3: {} + zod@3.25.50: {} zwitch@2.0.4: {} diff --git a/site/package.json b/site/package.json index 8ab39355..ef2db168 100644 --- a/site/package.json +++ b/site/package.json @@ -48,6 +48,7 @@ "@alleninstitute/vis-dzi": "workspace:*", "@alleninstitute/vis-geometry": "workspace:*", "@alleninstitute/vis-omezarr": "workspace:*", + "@alleninstitute/vis-precomputed": "workspace:*", "@astrojs/check": "0.9.4", "@astrojs/mdx": "4.3.0", "@astrojs/react": "4.3.0", diff --git a/site/src/content/docs/examples/annotated.mdx b/site/src/content/docs/examples/annotated.mdx new file mode 100644 index 00000000..121b0a57 --- /dev/null +++ b/site/src/content/docs/examples/annotated.mdx @@ -0,0 +1,15 @@ +--- +title: OMEZARR with NG Precomputed Annotations +description: WIP +tableOfContents: false +--- + +import { Demo } from '../../../examples/annotated/Demo.tsx'; + +A simple proof of concept for displaying (NG-precomputed) Point-annotations in combination with +an ome-zarr slice - the annotations are the locations of cells(?) within the volumetric image. + +
+
+ + diff --git a/site/src/examples/annotated/AnnotatedOmeZarr.tsx b/site/src/examples/annotated/AnnotatedOmeZarr.tsx new file mode 100644 index 00000000..20f4cd99 --- /dev/null +++ b/site/src/examples/annotated/AnnotatedOmeZarr.tsx @@ -0,0 +1,131 @@ +import { Box3D, CartesianPlane, Vec2, type box2D, type Interval, type vec2 } from '@alleninstitute/vis-geometry'; +import type { RenderFrameFn, RenderServer } from '@alleninstitute/vis-core'; +import { buildAsyncNGPointRenderer, type PointAnnotationInfo } from '@alleninstitute/vis-precomputed'; +import { + buildAsyncOmezarrRenderer, + type OmeZarrMetadata, + type RenderSettingsChannels, +} from '@alleninstitute/vis-omezarr'; +import { multithreadedDecoder } from '../common/loaders/ome-zarr/sliceWorkerPool'; +import { CameraHelper, type HandlerProps } from './CameraHelper'; +import { RenderClientHelper, type ServerRenderer } from './ClientHelper'; + +const compose = (ctx: CanvasRenderingContext2D, image: ImageData) => { + ctx.putImageData(image, 0, 0); +}; + +class AnnotatedVolumeRenderer implements ServerRenderer { + omeRenderer: ReturnType; + annoRenderer: ReturnType; + s: RenderServer; + constructor(server: RenderServer) { + this.s = server; + this.omeRenderer = buildAsyncOmezarrRenderer(server.regl, multithreadedDecoder, { + numChannels: 1, + queueOptions: { maximumInflightAsyncTasks: 2 }, + }); + this.annoRenderer = buildAsyncNGPointRenderer(server.regl); + } + renderWithServer(props: RenderProps & { cnvs: HTMLCanvasElement }): void { + const { camera, points, img, cnvs } = props; + const { view } = camera; + const channels = img.colorChannels.reduce((acc, val, index) => { + acc[val.label ?? `${index}`] = { + rgb: val.rgb, + gamut: val.range, + index, + }; + return acc; + }, {} as RenderSettingsChannels); + const defaultInterval: Interval = { min: 0, max: 80 }; + const fallbackChannels: RenderSettingsChannels = { + R: { rgb: [1.0, 0, 0], gamut: defaultInterval, index: 0 }, + G: { rgb: [0, 1.0, 0], gamut: defaultInterval, index: 1 }, + B: { rgb: [0, 0, 1.0], gamut: defaultInterval, index: 2 }, + }; + const renderPoints: RenderFrameFn = (target, cache, callback) => { + this.s.regl.clear({ framebuffer: target, color: [0.3, 0, 0, 1], depth: 1 }); + // so = our points are in meters, + // and our volume is in micrometers - thats 6 orders of magnitude + // + const middle = 1000 * 1e-6; + const rp = this.annoRenderer( + points, + { + // note: this camera scaling is fine for THIS DATA, specifically - a real app would want to read the files and behave in a data-dependant way! + camera: { + ...camera, + view: Box3D.create( + [...Vec2.scale(view.minCorner, 1e-6), middle - 300e-6], + [...Vec2.scale(view.maxCorner, 1e-6), middle + 300e-6], + ), + }, + color: [1, 1, 0], + outlineColor: [0, 0, 0], + lodThreshold: 10, + xyz: ['x', 'y', 'z'], + }, + callback, + target, + cache, + ); + const op = this.omeRenderer( + img, + { + camera, + channels: Object.keys(channels).length > 0 ? channels : fallbackChannels, + planeLocation: { + parameter: 0.5, + }, + plane: new CartesianPlane('xy'), + tileSize: 256, + }, + callback, + target, + cache, + ); + // return a lifecycle thingy that would cancel both: + return { + cancelFrame(reason) { + rp.cancelFrame(reason); + op.cancelFrame(reason); + }, + }; + }; + + this.s.beginRendering( + renderPoints, + (e) => { + if (e.status === 'progress' || e.status === 'finished') { + e.server.copyToClient(compose); + } + }, + cnvs, + ); + } +} +type RenderProps = { + camera: { view: box2D; screenSize: vec2 }; + points: PointAnnotationInfo; + img: OmeZarrMetadata; +}; +function makeRenderer(server: RenderServer) { + return new AnnotatedVolumeRenderer(server); +} +function RenderVoxelsAndDots(props: HandlerProps & RenderProps & { width: number; height: number }) { + return ; +} + +export function AnnotatedOmeZarrView(props: { screenSize: vec2; points: PointAnnotationInfo; img: OmeZarrMetadata }) { + const { points, img, screenSize } = props; + return ( + + ); +} diff --git a/site/src/examples/annotated/CameraHelper.tsx b/site/src/examples/annotated/CameraHelper.tsx new file mode 100644 index 00000000..48bbe5a6 --- /dev/null +++ b/site/src/examples/annotated/CameraHelper.tsx @@ -0,0 +1,62 @@ +import { Box2D, type box2D, type vec2 } from '@alleninstitute/vis-geometry'; +import { useState, type ComponentType } from 'react'; +import { pan, zoom } from '../common/camera'; + +type Camera = { + camera: { + view: box2D; + screenSize: vec2; + }; +}; +export type HandlerProps = { + onWheel?: (e: WheelEvent) => void; + onMouseDown?: (e: React.MouseEvent) => void; + onMouseUp?: (e: React.MouseEvent) => void; + onMouseMove?: (e: React.MouseEvent) => void; + onMouseLeave?: (e: React.MouseEvent) => void; +}; +type InnerProps = HandlerProps & T; +type Props = Omit & { + screenSize: vec2; + Thing: ComponentType>; +}; +export function CameraHelper(props: Props) { + const { screenSize, Thing } = props; + const [view, setView] = useState(Box2D.create([0, 0], [10000, 10000])); + const [dragging, setDragging] = useState(false); + + const handleZoom = (e: WheelEvent) => { + e.preventDefault(); + const zoomScale = e.deltaY > 0 ? 1.1 : 0.9; + const v = zoom(view, screenSize, zoomScale, [e.offsetX, e.offsetY]); + setView(v); + }; + + const handlePan = (e: React.MouseEvent) => { + if (dragging) { + const v = pan(view, screenSize, [e.movementX, e.movementY]); + setView(v); + } + }; + + const handleMouseDown = () => { + setDragging(true); + }; + + const handleMouseUp = () => { + setDragging(false); + }; + + return ( + //@ts-expect-error I've stared at this for a while... its fine + + ); +} diff --git a/site/src/examples/annotated/ClientHelper.tsx b/site/src/examples/annotated/ClientHelper.tsx new file mode 100644 index 00000000..8a4f6cdc --- /dev/null +++ b/site/src/examples/annotated/ClientHelper.tsx @@ -0,0 +1,70 @@ +// I'm sick of setting all this up over and over... + +import { useContext, useEffect, useRef } from 'react'; +import type { HandlerProps } from './CameraHelper'; +import type { RenderServer } from '@alleninstitute/vis-core'; +import { renderServerContext } from '../common/react/render-server-provider'; + +export interface ServerRenderer { + renderWithServer(props: T & { cnvs: HTMLCanvasElement }): void; +} + +type Props = T & + HandlerProps & { + width: number; + height: number; + newRenderer: (server: RenderServer) => ServerRenderer; + }; +export function RenderClientHelper(props: Props) { + const { width, height, newRenderer, onMouseDown, onMouseLeave, onMouseMove, onMouseUp, onWheel, ...rest } = props; + // add the handlers to our canvas, which we have to keep a reference to... + const cnvs = useRef(null); + const server = useContext(renderServerContext); + const renderer = useRef | undefined>(undefined); + // we have to add the listener this way because onWheel is a passive listener by default + // that means we can't preventDefault to stop scrolling + useEffect(() => { + const handleWheel = (e: WheelEvent) => onWheel?.(e); + const canvas = cnvs; + if (canvas?.current) { + canvas.current.addEventListener('wheel', handleWheel, { passive: false }); + } + return () => { + if (canvas?.current) { + canvas.current.removeEventListener('wheel', handleWheel); + } + }; + }, [onWheel]); + + // our once chance to initialize our renderer system + // biome-ignore lint/correctness/useExhaustiveDependencies: + useEffect(() => { + if (server) { + renderer.current = newRenderer(server); + } + return () => { + if (server && cnvs.current) { + server.destroyClient(cnvs.current); + } + }; + }, [server]); + // something changed, render with GL + // biome-ignore lint/correctness/useExhaustiveDependencies: + useEffect(() => { + if (server && renderer.current && cnvs.current) { + renderer.current.renderWithServer({ ...props, cnvs: cnvs.current }); + } + }, [...Object.values(rest)]); + + return ( + + ); +} diff --git a/site/src/examples/annotated/Demo.tsx b/site/src/examples/annotated/Demo.tsx new file mode 100644 index 00000000..6f7762e4 --- /dev/null +++ b/site/src/examples/annotated/Demo.tsx @@ -0,0 +1,45 @@ +import { loadMetadata, type OmeZarrMetadata } from '@alleninstitute/vis-omezarr'; +import { RenderServerProvider } from '../common/react/render-server-provider'; +import { AnnotatedOmeZarrView } from './AnnotatedOmeZarr'; +import { useEffect, useState } from 'react'; +import { isPointAnnotation, ParseNGPrecomputedInfo, type PointAnnotationInfo } from '@alleninstitute/vis-precomputed'; + +const SPIM = { + type: 's3', + region: 'us-west-2', + url: 's3://aind-open-data/SmartSPIM_787715_2025-04-08_18-33-36_stitched_2025-04-09_22-42-59/image_tile_fusing/OMEZarr/Ex_445_Em_469.zarr', + // url: 's3://allen-genetic-tools/tissuecyte/823818122/ome_zarr_conversion/823818122.zarr/', +} as const; +const dots = + 'https://aind-open-data.s3.amazonaws.com/SmartSPIM_787715_2025-04-08_18-33-36_stitched_2025-04-09_22-42-59/image_cell_segmentation/Ex_445_Em_469/visualization/detected_precomputed/'; +function useJunk() { + const [omezarr, setOmezarr] = useState(null); + const [annotations, setAnnotations] = useState(null); + useEffect(() => { + loadMetadata(SPIM).then((v) => { + setOmezarr(v); + }); + fetch(`${dots}info`) + .then((x) => x.json()) + .then((json) => ParseNGPrecomputedInfo(json)) + .then((yay) => { + if (yay && isPointAnnotation(yay)) { + setAnnotations({ ...yay, url: dots }); + } + }); + }, []); + + const loading = omezarr === null || annotations === null; + return [loading, annotations, omezarr] as const; +} + +export function Demo() { + const [_loading, points, img] = useJunk(); + return ( + +
+ {points && img && } +
+
+ ); +} diff --git a/site/src/examples/common/react/render-server-provider.tsx b/site/src/examples/common/react/render-server-provider.tsx index 93e7b495..dab95404 100644 --- a/site/src/examples/common/react/render-server-provider.tsx +++ b/site/src/examples/common/react/render-server-provider.tsx @@ -9,6 +9,10 @@ export function RenderServerProvider(props: PropsWithChildren) { useEffect(() => { server.current = new RenderServer([2048, 2048], ['oes_texture_float']); logger.info('server started...'); + return () => { + logger.info('server destroyed... (provider unmounted)'); + server.current?.destroyServer(); + }; }, []); return {children}; } diff --git a/site/src/examples/dzi/dzi-viewer.tsx b/site/src/examples/dzi/dzi-viewer.tsx index 8dc4fe2a..aae142a6 100644 --- a/site/src/examples/dzi/dzi-viewer.tsx +++ b/site/src/examples/dzi/dzi-viewer.tsx @@ -64,7 +64,6 @@ export function DziViewer(props: Props) { if (server && renderer.current && cnvs.current) { const renderMyData: RenderFrameFn = (target, cache, callback) => { if (renderer.current) { - // erase the frame before we start drawing on it return renderer.current(dzi, { camera }, callback, target, cache); } return null; diff --git a/site/src/examples/omezarr/omezarr-demo.tsx b/site/src/examples/omezarr/omezarr-demo.tsx index 1733a9a7..55b51f8d 100644 --- a/site/src/examples/omezarr/omezarr-demo.tsx +++ b/site/src/examples/omezarr/omezarr-demo.tsx @@ -43,6 +43,15 @@ const demoOptions: DemoOption[] = [ url: 's3://allen-genetic-tools/tissuecyte/823818122/ome_zarr_conversion/823818122.zarr/', }, }, + { + value: 'opt5', + label: 'Smart-SPIM (experimental)', + res: { + type: 's3', + region: 'us-west-2', + url: 's3://aind-open-data/SmartSPIM_787715_2025-04-08_18-33-36_stitched_2025-04-09_22-42-59/image_tile_fusing/OMEZarr/Ex_445_Em_469.zarr', + }, + }, ]; const screenSize: vec2 = [800, 800]; @@ -67,7 +76,7 @@ function makeZarrSettings(screenSize: vec2, view: box2D, orthoVal: number, omeza return { camera: { screenSize, view }, - orthoVal, + planeLocation: { parameter: orthoVal }, plane: PLANE_XY, tileSize: 256, channels: Object.keys(omezarrChannels).length > 0 ? omezarrChannels : fallbackChannels, @@ -92,7 +101,7 @@ export function OmezarrDemo() { loadMetadata(res).then((v) => { setOmezarr(v); setOmezarrJson(JSON.stringify(v, undefined, 4)); - setPlaneIndex(Math.floor(v.maxOrthogonal(PLANE_XY) / 2)); + setPlaneIndex(0.5); const dataset = v.getFirstShapedDataset(0); if (!dataset) { throw new Error('dataset 0 does not exist!'); @@ -132,7 +141,8 @@ export function OmezarrDemo() { // you could put this on the mouse wheel, but for this demo we'll have buttons const handlePlaneIndex = (next: 1 | -1) => { - setPlaneIndex((prev) => Math.max(0, Math.min(prev + next, (omezarr?.maxOrthogonal(PLANE_XY) ?? 1) - 1))); + const step = 1 / (omezarr?.maxOrthogonal(PLANE_XY) ?? 1); + setPlaneIndex((prev) => Math.max(0, Math.min(1, prev + step * next))); }; const handleZoom = (e: WheelEvent) => { @@ -233,7 +243,8 @@ export function OmezarrDemo() { > {(omezarr && ( - Slide {planeIndex + 1} of {omezarr?.maxOrthogonal(PLANE_XY) ?? 0} + Slide {Math.floor(planeIndex * (omezarr?.maxOrthogonal(PLANE_XY) ?? 1))} of{' '} + {omezarr?.maxOrthogonal(PLANE_XY) ?? 0} )) || No image loaded}
diff --git a/site/src/examples/omezarr/omezarr-viewer.tsx b/site/src/examples/omezarr/omezarr-viewer.tsx index 0c51b9e2..aee37467 100644 --- a/site/src/examples/omezarr/omezarr-viewer.tsx +++ b/site/src/examples/omezarr/omezarr-viewer.tsx @@ -108,7 +108,7 @@ export function OmezarrViewer({ if (server && stash.current) { server.regl.clear({ framebuffer: stash.current.image, color: [0, 0, 0, 0], depth: 1 }); } - }, [server, settings.orthoVal]); + }, [server, settings.planeLocation]); // render frames useEffect(() => { const stashProgress = (server: RenderServer, target: REGL.Framebuffer2D) => { diff --git a/site/src/examples/omezarr/sliceview.tsx b/site/src/examples/omezarr/sliceview.tsx index a4eaba39..870fe5f9 100644 --- a/site/src/examples/omezarr/sliceview.tsx +++ b/site/src/examples/omezarr/sliceview.tsx @@ -36,7 +36,9 @@ const settings: RenderSettings = { }, }, plane: PLANE_XY, - orthoVal: 3, + planeLocation: { + index: 3, + }, camera: { // the omezarr renderer expects a box in whatever space is given by the omezarr file itself in its // axes metadata = for example, millimeters. if you load a volume that says its 30mm X 30mm X 10mm,