From 1dc82f3eb3412b5c31c1c5c738b9829c09cb320c Mon Sep 17 00:00:00 2001 From: jaewook Date: Fri, 7 Nov 2025 16:46:49 +0900 Subject: [PATCH 1/4] vae implementation # Conflicts: # bonsai/models/vae/README.md # bonsai/models/vae/modeling.py # bonsai/models/vae/params.py # bonsai/models/vae/tests/VAE_segmentation_example.ipynb # bonsai/models/vae/tests/run_model.py # pyproject.toml # Conflicts: # bonsai/models/vae/tests/VAE_segmentation_example.ipynb # bonsai/models/vae/tests/run_model.py --- bonsai/models/vae/modeling.py | 460 +++++++++++++++--- bonsai/models/vae/params.py | 266 +++++++++- .../VAE_image_reconstruction_example.ipynb | 344 +++++++++++++ .../vae/tests/VAE_segmentation_example.ipynb | 315 ------------ bonsai/models/vae/tests/run_model.py | 61 ++- bonsai/models/vae/tests/test_outputs_vae.py | 51 ++ pyproject.toml | 1 + 7 files changed, 1083 insertions(+), 415 deletions(-) create mode 100644 bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb delete mode 100644 bonsai/models/vae/tests/VAE_segmentation_example.ipynb create mode 100644 bonsai/models/vae/tests/test_outputs_vae.py diff --git a/bonsai/models/vae/modeling.py b/bonsai/models/vae/modeling.py index 3b6b1099..849a850f 100644 --- a/bonsai/models/vae/modeling.py +++ b/bonsai/models/vae/modeling.py @@ -1,87 +1,417 @@ -import dataclasses -import logging -from functools import partial -from itertools import pairwise -from typing import Sequence +from typing import Optional import jax +import jax.image import jax.numpy as jnp from flax import nnx -@dataclasses.dataclass(frozen=True) -class ModelConfig: - """Configuration for the Variational Autoencoder (VAE) model.""" +class ResnetBlock(nnx.Module): + conv_shortcut: nnx.Data[Optional[nnx.Conv]] - input_dim: int = 784 # 28*28 for MNIST - hidden_dims: Sequence[int] = (512, 256) - latent_dim: int = 20 + def __init__(self, in_channels: int, out_channels: int, groups: int, rngs: nnx.Rngs): + self.conv_shortcut = None + if in_channels != out_channels: + self.conv_shortcut = nnx.Conv( + in_features=in_channels, + out_features=out_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + use_bias=True, + rngs=rngs, + ) + self.norm1 = nnx.GroupNorm(num_groups=groups, num_features=in_channels, epsilon=1e-6, rngs=rngs) + self.conv1 = nnx.Conv( + in_features=in_channels, + out_features=out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding="SAME", + rngs=rngs, + ) + self.norm2 = nnx.GroupNorm(num_groups=groups, num_features=out_channels, epsilon=1e-6, rngs=rngs) + self.conv2 = nnx.Conv( + in_features=out_channels, + out_features=out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding="SAME", + rngs=rngs, + ) + + def __call__(self, input_tensor): + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states) + hidden_states = nnx.silu(hidden_states) + hidden_states = self.conv1(hidden_states) + + hidden_states = self.norm2(hidden_states) + hidden_states = nnx.silu(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = (input_tensor + hidden_states) / 1.0 + + return output_tensor + + +class DownEncoderBlock2D(nnx.Module): + downsamplers: nnx.Data[Optional[nnx.Conv]] + + def __init__(self, in_channels: int, out_channels: int, groups: int, is_final_block: bool, rngs: nnx.Rngs): + self.resnets = nnx.List([]) + + for i in range(2): + current_in_channels = in_channels if i == 0 else out_channels + self.resnets.append( + ResnetBlock(in_channels=current_in_channels, out_channels=out_channels, groups=groups, rngs=rngs) + ) + + self.downsamplers = None + + if not is_final_block: + self.downsamplers = nnx.Conv( + in_features=out_channels, + out_features=out_channels, + kernel_size=(3, 3), + strides=(2, 2), + padding="SAME", + rngs=rngs, + ) + + def __call__(self, x): + for resnet in self.resnets: + x = resnet(x) + + if self.downsamplers is not None: + x = self.downsamplers(x) + + return x + + +def scaled_dot_product_attention(query, key, value): + d_k = query.shape[-1] + scale_factor = 1.0 / jnp.sqrt(d_k) + + attention_scores = jnp.einsum("bhld,bhsd->bhls", query, key) + + attention_scores *= scale_factor + attention_weights = jax.nn.softmax(attention_scores, axis=-1) + + output = jnp.einsum("bhls,bhsd->bhld", attention_weights, value) + + return output + + +class Attention(nnx.Module): + def __init__(self, channels: int, groups: int, rngs: nnx.Rngs): + self.group_norm = nnx.GroupNorm(num_groups=groups, num_features=channels, epsilon=1e-6, rngs=rngs) + + self.to_q = nnx.Linear(in_features=channels, out_features=channels, use_bias=True, rngs=rngs) + self.to_k = nnx.Linear(in_features=channels, out_features=channels, use_bias=True, rngs=rngs) + self.to_v = nnx.Linear(in_features=channels, out_features=channels, use_bias=True, rngs=rngs) + + self.to_out = nnx.Linear(in_features=channels, out_features=channels, use_bias=True, rngs=rngs) + + def __call__(self, hidden_states): + heads = 1 + rescale_output_factor = 1 + residual = hidden_states + + batch_size, height, width, channel = None, None, None, None + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, height, width, channel = hidden_states.shape + hidden_states = hidden_states.reshape(batch_size, height * width, channel) + + batch_size, _, _ = hidden_states.shape + hidden_states = self.group_norm(hidden_states) + + query = self.to_q(hidden_states) + + encoder_hidden_states = hidden_states + + key = self.to_k(encoder_hidden_states) + value = self.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // heads + + query = query.reshape(batch_size, -1, heads, head_dim) + query = jnp.transpose(query, (0, 2, 1, 3)) + + key = key.reshape(batch_size, -1, heads, head_dim) + key = jnp.transpose(key, (0, 2, 1, 3)) + value = value.reshape(batch_size, -1, heads, head_dim) + value = jnp.transpose(value, (0, 2, 1, 3)) + + hidden_states = scaled_dot_product_attention(query, key, value) + + hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3)) + B, L, H, D = hidden_states.shape + hidden_states = hidden_states.reshape(B, L, H * D) + + hidden_states = self.to_out(hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.reshape(batch_size, height, width, channel) + + hidden_states = hidden_states + residual + hidden_states = hidden_states / rescale_output_factor + + return hidden_states + + +class UNetMidBlock2D(nnx.Module): + def __init__(self, channels: int, groups: int, num_res_blocks: int, rngs: nnx.Rngs): + self.resnets = nnx.List([]) + + for i in range(num_res_blocks): + self.resnets.append(ResnetBlock(in_channels=channels, out_channels=channels, groups=groups, rngs=rngs)) + + self.attentions = nnx.List([Attention(channels=channels, groups=groups, rngs=rngs)]) + + def __call__(self, x): + x = self.resnets[0](x) + x = self.attentions[0](x) + x = self.resnets[1](x) + + return x class Encoder(nnx.Module): - """Encodes the input into latent space parameters (mu and logvar).""" + def __init__(self, block_out_channels, rngs: nnx.Rngs): + groups = 32 + + self.conv_in = nnx.Conv( + in_features=3, + out_features=block_out_channels[0], + kernel_size=(3, 3), + strides=(1, 1), + padding="SAME", + rngs=rngs, + ) + + self.down_blocks = nnx.List([]) + + in_channels = block_out_channels[0] + + for i, out_channels in enumerate(block_out_channels): + is_final_block = i == len(block_out_channels) - 1 + + self.down_blocks.append( + DownEncoderBlock2D( + in_channels=in_channels, + out_channels=out_channels, + groups=groups, + is_final_block=is_final_block, + rngs=rngs, + ) + ) + + in_channels = out_channels + + self.mid_block = UNetMidBlock2D(channels=in_channels, groups=groups, num_res_blocks=2, rngs=rngs) + self.conv_norm_out = nnx.GroupNorm( + num_groups=groups, num_features=block_out_channels[-1], epsilon=1e-6, rngs=rngs + ) + + conv_out_channels = 2 * 4 + + self.conv_out = nnx.Conv( + in_features=block_out_channels[-1], + out_features=conv_out_channels, + kernel_size=(3, 3), + strides=(1, 1), + padding="SAME", + rngs=rngs, + ) + + def __call__(self, x): + x = self.conv_in(x) + + for down_block in self.down_blocks: + x = down_block(x) + + x = self.mid_block(x) + x = self.conv_norm_out(x) + x = nnx.silu(x) + x = self.conv_out(x) + + return x + + +def upsample_nearest2d(input_tensor, scale_factors): + # (N, C, H_in, W_in) -> (N, H_in, W_in, C) + input_permuted = jnp.transpose(input_tensor, (0, 2, 3, 1)) + + # Nearest neighbor interpolation using jax.image.resize + output_permuted = jax.image.resize( + input_permuted, + shape=( + input_permuted.shape[0], + int(input_permuted.shape[1] * scale_factors[0]), # H_out + int(input_permuted.shape[2] * scale_factors[1]), # W_out + input_permuted.shape[3], # C + ), + method="nearest", + ) + + # (N, C, H_out, W_out) + output_tensor = jnp.transpose(output_permuted, (0, 3, 1, 2)) + + return output_tensor + - def __init__(self, cfg: ModelConfig, *, rngs: nnx.Rngs): - self.hidden_layers = [ - nnx.Linear(in_features, out_features, rngs=rngs) - for in_features, out_features in zip([cfg.input_dim, *list(cfg.hidden_dims)], cfg.hidden_dims) - ] - self.fc_mu = nnx.Linear(cfg.hidden_dims[-1], cfg.latent_dim, rngs=rngs) - self.fc_logvar = nnx.Linear(cfg.hidden_dims[-1], cfg.latent_dim, rngs=rngs) +def interpolate(input, scale_factor): + dim = input.ndim - 2 # 4 - 2 + scale_factors = [scale_factor for _ in range(dim)] # 2.0, 2.0 + return upsample_nearest2d(input, scale_factors) - def __call__(self, x: jax.Array) -> tuple[jax.Array, jax.Array]: - # Flatten the image - x = x.reshape((x.shape[0], -1)) - for layer in self.hidden_layers: - x = nnx.relu(layer(x)) - mu = self.fc_mu(x) - logvar = self.fc_logvar(x) - return mu, logvar +class Upsample2D(nnx.Module): + def __init__(self, channel: int, scale_factor: int, rngs: nnx.Rngs): + self.scale_factor = scale_factor + self.conv = nnx.Conv( + in_features=channel, + out_features=channel, + kernel_size=(3, 3), + strides=(1, 1), + padding="SAME", + use_bias=True, + rngs=rngs, + ) + + def __call__(self, x): + b, h, w, c = x.shape + new_shape = (b, int(h * self.scale_factor), int(w * self.scale_factor), c) + x = jax.image.resize(x, shape=new_shape, method="nearest") + x = self.conv(x) + + return x + + +class UpDecoderBlock2D(nnx.Module): + upsamplers = nnx.Data[Optional["Upsample2D"]] + + def __init__(self, in_channels: int, out_channels: int, groups: int, is_final_block: bool, rngs: nnx.Rngs): + self.resnets = nnx.List([]) + + for i in range(3): + current_in_channels = in_channels if i == 0 else out_channels + self.resnets.append( + ResnetBlock(in_channels=current_in_channels, out_channels=out_channels, groups=groups, rngs=rngs) + ) + + if not is_final_block: + self.upsamplers = Upsample2D(channel=out_channels, scale_factor=2.0, rngs=rngs) + else: + self.upsamplers = None + + def __call__(self, x): + for resnet in self.resnets: + x = resnet(x) + + if self.upsamplers is not None: + x = self.upsamplers(x) + + return x class Decoder(nnx.Module): - """Decodes the latent vector back into the original input space.""" + def __init__(self, latent_channels, block_out_channels, rngs: nnx.Rngs): + groups = 32 + + self.conv_in = nnx.Conv( + in_features=latent_channels, + out_features=block_out_channels[-1], + kernel_size=(3, 3), + strides=(1, 1), + padding="SAME", + rngs=rngs, + ) + self.mid_block = UNetMidBlock2D(channels=block_out_channels[-1], groups=groups, num_res_blocks=2, rngs=rngs) + self.up_blocks = nnx.List([]) - def __init__(self, cfg: ModelConfig, *, rngs: nnx.Rngs): - # Mirrored architecture of the encoder - dims = [cfg.latent_dim, *list(reversed(cfg.hidden_dims))] - self.hidden_layers = [ - nnx.Linear(in_features, out_features, rngs=rngs) for in_features, out_features in pairwise(dims, dims[1:]) - ] - self.fc_out = nnx.Linear(dims[-1], cfg.input_dim, rngs=rngs) + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] - def __call__(self, z: jax.Array) -> jax.Array: - for layer in self.hidden_layers: - z = nnx.relu(layer(z)) + for i, out_channels in enumerate(block_out_channels): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] - reconstruction_logits = self.fc_out(z) - return reconstruction_logits + is_final_block = i == len(block_out_channels) - 1 + + self.up_blocks.append( + UpDecoderBlock2D( + in_channels=prev_output_channel, + out_channels=output_channel, + groups=groups, + is_final_block=is_final_block, + rngs=rngs, + ) + ) + + prev_output_channel = output_channel + + self.conv_norm_out = nnx.GroupNorm( + num_groups=groups, num_features=block_out_channels[0], epsilon=1e-6, rngs=rngs + ) + + self.conv_out = nnx.Conv(block_out_channels[0], 3, kernel_size=(3, 3), strides=1, padding=1, rngs=rngs) + + def __call__(self, x): + x = self.conv_in(x) + x = self.mid_block(x) + for up_block in self.up_blocks: + x = up_block(x) + x = self.conv_norm_out(x) + x = nnx.silu(x) + x = self.conv_out(x) + + return x class VAE(nnx.Module): - """Full Variational Autoencoder model.""" - - def __init__(self, cfg: ModelConfig, *, rngs: nnx.Rngs): - logging.warning("This model does not load weights from a reference implementation.") - self.cfg = cfg - self.encoder = Encoder(cfg, rngs=rngs) - self.decoder = Decoder(cfg, rngs=rngs) - - def reparameterize(self, mu: jax.Array, logvar: jax.Array, key: jax.Array) -> jax.Array: - """Performs the reparameterization trick to sample from the latent space.""" - std = jnp.exp(0.5 * logvar) - epsilon = jax.random.normal(key, std.shape) - return mu + epsilon * std - - def __call__(self, x: jax.Array, sample_key: jax.Array) -> tuple[jax.Array, jax.Array, jax.Array]: - """Defines the forward pass of the VAE.""" - mu, logvar = self.encoder(x) - z = self.reparameterize(mu, logvar, sample_key) - reconstruction = self.decoder(z) - return reconstruction, mu, logvar - - -@partial(jax.jit, static_argnums=(0,)) -def forward(model, x, key): - return model(x, key) + def __init__(self, rngs: nnx.Rngs): + block_out_channels = [128, 256, 512, 512] + latent_channels = 4 + + self.encoder = Encoder(block_out_channels, rngs) + self.quant_conv = nnx.Conv( + in_features=2 * latent_channels, + out_features=2 * latent_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + rngs=rngs, + ) + self.post_quant_conv = nnx.Conv( + in_features=latent_channels, + out_features=latent_channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + rngs=rngs, + ) + self.decoder = Decoder(latent_channels=latent_channels, block_out_channels=block_out_channels, rngs=rngs) + + def __call__(self, x): + x = self.encoder(x) + x = self.quant_conv(x) + mean, _ = jnp.split(x, 2, axis=-1) + x = self.post_quant_conv(mean) + x = self.decoder(x) + + return x + + +@jax.jit +def forward(model, x): + return model(x) diff --git a/bonsai/models/vae/params.py b/bonsai/models/vae/params.py index 04ea95c5..0545c9ee 100644 --- a/bonsai/models/vae/params.py +++ b/bonsai/models/vae/params.py @@ -1,27 +1,259 @@ +# Copyright 2025 The JAX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re + import jax +import safetensors.flax as safetensors +from etils import epath from flax import nnx -from bonsai.models.vae import modeling as vae_lib +from bonsai.models.vae import modeling as model_lib + +TO_JAX_CONV_2D_KERNEL = (2, 3, 1, 0) # (C_out, C_in, kH, kW) -> (kH, kW, C_in, C_out) +TO_JAX_LINEAR_KERNEL = (1, 0) + + +def _get_key_and_transform_mapping(): + return { + # encoder + ## conv in + r"^encoder.conv_in.weight$": (r"encoder.conv_in.kernel", (TO_JAX_CONV_2D_KERNEL, None)), + r"^encoder.conv_in.bias$": (r"encoder.conv_in.bias", None), + ## down blocks + r"^encoder.down_blocks.([0-3]).resnets.([0-1]).norm([1-2]).weight$": ( + r"encoder.down_blocks.\1.resnets.\2.norm\3.scale", + None, + ), + r"^encoder.down_blocks.([0-3]).resnets.([0-1]).norm([1-2]).bias$": ( + r"encoder.down_blocks.\1.resnets.\2.norm\3.bias", + None, + ), + r"^encoder.down_blocks.([0-3]).resnets.([0-1]).conv([1-2]).weight$": ( + r"encoder.down_blocks.\1.resnets.\2.conv\3.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^encoder.down_blocks.([0-3]).resnets.([0-1]).conv([1-2]).bias$": ( + r"encoder.down_blocks.\1.resnets.\2.conv\3.bias", + None, + ), + r"^encoder.down_blocks.([1-2]).resnets.0.conv_shortcut.weight$": ( + r"encoder.down_blocks.\1.resnets.0.conv_shortcut.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^encoder.down_blocks.([1-2]).resnets.0.conv_shortcut.bias$": ( + r"encoder.down_blocks.\1.resnets.0.conv_shortcut.bias", + None, + ), + r"^encoder.down_blocks.([0-2]).downsamplers.0.conv.weight$": ( + r"encoder.down_blocks.\1.downsamplers.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^encoder.down_blocks.([0-2]).downsamplers.0.conv.bias$": (r"encoder.down_blocks.\1.downsamplers.bias", None), + ## mid block + r"^encoder.mid_block.attentions.0.group_norm.weight$": ( + r"encoder.mid_block.attentions.0.group_norm.scale", + None, + ), + r"^encoder.mid_block.attentions.0.group_norm.bias$": (r"encoder.mid_block.attentions.0.group_norm.bias", None), + r"^encoder.mid_block.attentions.0.query.weight$": ( + r"encoder.mid_block.attentions.0.to_q.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^encoder.mid_block.attentions.0.query.bias$": (r"encoder.mid_block.attentions.0.to_q.bias", None), + r"^encoder.mid_block.attentions.0.key.weight$": ( + r"encoder.mid_block.attentions.0.to_k.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^encoder.mid_block.attentions.0.key.bias$": (r"encoder.mid_block.attentions.0.to_k.bias", None), + r"^encoder.mid_block.attentions.0.value.weight$": ( + r"encoder.mid_block.attentions.0.to_v.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^encoder.mid_block.attentions.0.value.bias$": (r"encoder.mid_block.attentions.0.to_v.bias", None), + r"^encoder.mid_block.attentions.0.proj_attn.weight$": ( + r"encoder.mid_block.attentions.0.to_out.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^encoder.mid_block.attentions.0.proj_attn.bias$": (r"encoder.mid_block.attentions.0.to_out.bias", None), + r"^encoder.mid_block.resnets.([0-1]).conv([1-2]).weight$": ( + r"encoder.mid_block.resnets.\1.conv\2.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^encoder.mid_block.resnets.([0-1]).conv([1-2]).bias$": (r"encoder.mid_block.resnets.\1.conv\2.bias", None), + r"^encoder.mid_block.resnets.([0-1]).norm([1-2]).weight$": (r"encoder.mid_block.resnets.\1.norm\2.scale", None), + r"^encoder.mid_block.resnets.([0-1]).norm([1-2]).bias$": (r"encoder.mid_block.resnets.\1.norm\2.bias", None), + ## conv norm out + r"^encoder.conv_norm_out.weight$": (r"encoder.conv_norm_out.scale", None), + r"^encoder.conv_norm_out.bias$": (r"encoder.conv_norm_out.bias", None), + ## conv out + r"^encoder.conv_out.weight$": (r"encoder.conv_out.kernel", (TO_JAX_CONV_2D_KERNEL, None)), + r"^encoder.conv_out.bias": (r"encoder.conv_out.bias", None), + # latent space + ## quant_conv + r"^quant_conv.weight$": (r"quant_conv.kernel", (TO_JAX_CONV_2D_KERNEL, None)), + r"^quant_conv.bias$": (r"quant_conv.bias", None), + ## post_quant_conv + r"^post_quant_conv.weight$": (r"post_quant_conv.kernel", (TO_JAX_CONV_2D_KERNEL, None)), + r"^post_quant_conv.bias$": (r"post_quant_conv.bias", None), + # decoder + ## conv in + r"^decoder.conv_in.weight$": (r"decoder.conv_in.kernel", (TO_JAX_CONV_2D_KERNEL, None)), + r"^decoder.conv_in.bias$": (r"decoder.conv_in.bias", None), + ## mid block + r"^decoder.mid_block.attentions.0.group_norm.weight$": ( + r"decoder.mid_block.attentions.0.group_norm.scale", + None, + ), + r"^decoder.mid_block.attentions.0.group_norm.bias$": (r"decoder.mid_block.attentions.0.group_norm.bias", None), + r"^decoder.mid_block.attentions.0.query.weight$": ( + r"decoder.mid_block.attentions.0.to_q.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^decoder.mid_block.attentions.0.query.bias$": (r"decoder.mid_block.attentions.0.to_q.bias", None), + r"^decoder.mid_block.attentions.0.key.weight$": ( + r"decoder.mid_block.attentions.0.to_k.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^decoder.mid_block.attentions.0.key.bias$": (r"decoder.mid_block.attentions.0.to_k.bias", None), + r"^decoder.mid_block.attentions.0.value.weight$": ( + r"decoder.mid_block.attentions.0.to_v.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^decoder.mid_block.attentions.0.value.bias$": (r"decoder.mid_block.attentions.0.to_v.bias", None), + r"^decoder.mid_block.attentions.0.proj_attn.weight$": ( + r"decoder.mid_block.attentions.0.to_out.kernel", + (TO_JAX_LINEAR_KERNEL, None), + ), + r"^decoder.mid_block.attentions.0.proj_attn.bias$": (r"decoder.mid_block.attentions.0.to_out.bias", None), + r"^decoder.mid_block.resnets.([0-1]).norm([1-2]).weight$": (r"decoder.mid_block.resnets.\1.norm\2.scale", None), + r"^decoder.mid_block.resnets.([0-1]).norm([1-2]).bias$": (r"decoder.mid_block.resnets.\1.norm\2.bias", None), + r"^decoder.mid_block.resnets.([0-1]).conv([1-2]).weight$": ( + r"decoder.mid_block.resnets.\1.conv\2.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^decoder.mid_block.resnets.([0-1]).conv([1-2]).bias": (r"decoder.mid_block.resnets.\1.conv\2.bias", None), + ## up blocks + r"^decoder.up_blocks.([0-3]).resnets.([0-2]).norm([1-2]).weight$": ( + r"decoder.up_blocks.\1.resnets.\2.norm\3.scale", + None, + ), + r"^decoder.up_blocks.([0-3]).resnets.([0-2]).norm([1-2]).bias$": ( + r"decoder.up_blocks.\1.resnets.\2.norm\3.bias", + None, + ), + r"^decoder.up_blocks.([0-3]).resnets.([0-2]).conv([1-2]).weight$": ( + r"decoder.up_blocks.\1.resnets.\2.conv\3.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^decoder.up_blocks.([0-3]).resnets.([0-2]).conv([1-2]).bias$": ( + r"decoder.up_blocks.\1.resnets.\2.conv\3.bias", + None, + ), + r"^decoder.up_blocks.([2-3]).resnets.0.conv_shortcut.weight$": ( + r"decoder.up_blocks.\1.resnets.0.conv_shortcut.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^decoder.up_blocks.([2-3]).resnets.0.conv_shortcut.bias$": ( + r"decoder.up_blocks.\1.resnets.0.conv_shortcut.bias", + None, + ), + r"^decoder.up_blocks.([0-2]).upsamplers.0.conv.weight$": ( + r"decoder.up_blocks.\1.upsamplers.conv.kernel", + (TO_JAX_CONV_2D_KERNEL, None), + ), + r"^decoder.up_blocks.([0-2]).upsamplers.0.conv.bias$": (r"decoder.up_blocks.\1.upsamplers.conv.bias", None), + ## conv norm out + r"^decoder.conv_norm_out.weight$": (r"decoder.conv_norm_out.scale", None), + r"^decoder.conv_norm_out.bias$": (r"decoder.conv_norm_out.bias", None), + ## conv out + r"^decoder.conv_out.weight$": (r"decoder.conv_out.kernel", (TO_JAX_CONV_2D_KERNEL, None)), + r"^decoder.conv_out.bias$": (r"decoder.conv_out.bias", None), + } + + +def _st_key_to_jax_key(mapping, source_key): + """Map a safetensors key to exactly one JAX key & transform, else warn/error.""" + subs = [ + (re.sub(pat, repl, source_key), transform) + for pat, (repl, transform) in mapping.items() + if re.match(pat, source_key) + ] + if not subs: + logging.warning(f"No mapping found for key: {source_key!r}") + return None, None + if len(subs) > 1: + keys = [s for s, _ in subs] + raise ValueError(f"Multiple mappings found for {source_key!r}: {keys}") + return subs[0] + + +def _assign_weights(keys, tensor, state_dict, st_key, transform): + """Recursively descend into state_dict and assign the (possibly permuted/reshaped) tensor.""" + key, *rest = keys + if not rest: + if transform is not None: + permute, reshape = transform + if permute: + tensor = tensor.transpose(permute) + if reshape: + tensor = tensor.reshape(reshape) + if tensor.shape != state_dict[key].shape: + raise ValueError(f"Shape mismatch for {st_key}: {tensor.shape} vs {state_dict[key].shape}") + state_dict[key] = tensor + else: + _assign_weights(rest, tensor, state_dict[key], st_key, transform) + + +def _stoi(s): + try: + return int(s) + except ValueError: + return s -def create_model( - cfg: vae_lib.ModelConfig, - rngs: nnx.Rngs, +def create_model_from_safe_tensors( + file_dir: str, mesh: jax.sharding.Mesh | None = None, -) -> vae_lib.VAE: - """ - Create a VAE model with initialized parameters. +) -> model_lib.VAE: + """Load tensors from the safetensors file and create a VAE model.""" + files = list(epath.Path(file_dir).expanduser().glob("*.safetensors")) + if not files: + raise ValueError(f"No safetensors found in {file_dir}") - Returns: - A flax.nnx.Module instance with random parameters. - """ - model = vae_lib.VAE(cfg, rngs=rngs) + tensor_dict = {} + for f in files: + tensor_dict |= safetensors.load_file(f) + + vae = nnx.eval_shape(lambda: model_lib.VAE(rngs=nnx.Rngs(params=0))) + graph_def, abs_state = nnx.split(vae) + jax_state = abs_state.to_pure_dict() + + mapping = _get_key_and_transform_mapping() + + for st_key, tensor in tensor_dict.items(): + jax_key, transform = _st_key_to_jax_key(mapping, st_key) + if jax_key is None: + continue + keys = [_stoi(k) for k in jax_key.split(".")] + _assign_weights(keys, tensor, jax_state, st_key, transform) if mesh is not None: - # This part is for distributed execution, if needed. - graph_def, state = nnx.split(model) - sharding = nnx.get_named_sharding(model, mesh) - state = jax.device_put(state, sharding) - return nnx.merge(graph_def, state) + sharding = nnx.get_named_sharding(abs_state, mesh).to_pure_dict() + state_dict = jax.device_put(jax_state, sharding) else: - return model + state_dict = jax.device_put(jax_state, jax.devices()[0]) + + return nnx.merge(graph_def, state_dict) diff --git a/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb b/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb new file mode 100644 index 00000000..26737ee4 --- /dev/null +++ b/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb @@ -0,0 +1,344 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e320f2616490638c", + "metadata": {}, + "source": "\"Open" + }, + { + "cell_type": "markdown", + "id": "8e8e90310dd3bfef", + "metadata": {}, + "source": [ + "# **Image Reconstruction with VAE**\n", + "\n", + "This notebook demonstrates image reconstruction using the [Bonsai library](https://github.com/jax-ml/bonsai) and the [sd-vae-ft-mse](https://huggingface.co/stabilityai/sd-vae-ft-mse) weights." + ] + }, + { + "cell_type": "markdown", + "id": "457a9ff4dbb654d7", + "metadata": {}, + "source": "## **Set-up**" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b1ffef9ca9c37a5b", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -q git+https://github.com/eari100/bonsai@vae-weights-and-tests\n", + "!pip install -q pillow matplotlib requests\n", + "!pip install -q scikit-image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c22813e853610af", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import zipfile\n", + "\n", + "import jax\n", + "import jax.numpy as jnp\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import requests\n", + "from PIL import Image\n", + "from skimage.metrics import peak_signal_noise_ratio as psnr\n", + "from skimage.metrics import structural_similarity as ssim\n", + "from tqdm import tqdm\n", + "\n", + "print(f\"JAX version: {jax.__version__}\")\n", + "print(f\"JAX device: {jax.devices()[0].platform}\")" + ] + }, + { + "cell_type": "markdown", + "id": "7efb43325c1f570c", + "metadata": {}, + "source": "## **Download Sample Images**" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a88906f466af2a35", + "metadata": {}, + "outputs": [], + "source": [ + "def download_coco_test_set(dest_folder=\"./coco_val2017\"):\n", + " if not os.path.exists(dest_folder):\n", + " os.makedirs(dest_folder)\n", + "\n", + " url = \"http://images.cocodataset.org/zips/val2017.zip\"\n", + " target_path = os.path.join(dest_folder, \"val2017.zip\")\n", + "\n", + " print(f\"Downloading {url}...\")\n", + " response = requests.get(url, stream=True)\n", + " total_size = int(response.headers.get(\"content-length\", 0))\n", + "\n", + " with (\n", + " open(target_path, \"wb\") as f,\n", + " tqdm(\n", + " desc=\"Progress\",\n", + " total=total_size,\n", + " unit=\"iB\",\n", + " unit_scale=True,\n", + " unit_divisor=1024,\n", + " ) as bar,\n", + " ):\n", + " for data in response.iter_content(chunk_size=1024):\n", + " size = f.write(data)\n", + " bar.update(size)\n", + "\n", + " print(\"\\nExtracting files...\")\n", + " with zipfile.ZipFile(target_path, \"r\") as zip_ref:\n", + " zip_ref.extractall(dest_folder)\n", + "\n", + " os.remove(target_path)\n", + " print(f\"Done! Images are saved in: {os.path.abspath(dest_folder)}\")\n", + "\n", + "\n", + "download_coco_test_set()" + ] + }, + { + "cell_type": "markdown", + "id": "6beb39b427edc794", + "metadata": {}, + "source": "## **Load VAE Model**" + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "68cb1cd2df9448ef", + "metadata": {}, + "outputs": [], + "source": [ + "from flax import nnx\n", + "from huggingface_hub import snapshot_download\n", + "\n", + "from bonsai.models.vae import modeling as model_lib\n", + "from bonsai.models.vae import params\n", + "\n", + "\n", + "def load_vae_model():\n", + " model_name = \"stabilityai/sd-vae-ft-mse\"\n", + "\n", + " print(f\"Downloading {model_name}...\")\n", + " model_ckpt_path = snapshot_download(model_name)\n", + " print(\"Download complete!\")\n", + "\n", + " model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path)\n", + "\n", + " print(\"VAE model loaded_successfully!\")\n", + "\n", + " return model" + ] + }, + { + "cell_type": "markdown", + "id": "f6d864491e958d33", + "metadata": {}, + "source": "## **Image Preprocessing**" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d314a794e16df46f", + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess(image):\n", + " image = image.convert(\"RGB\").resize((256, 256))\n", + "\n", + " # normalization: [0, 255] -> [0, 1] -> [-1, 1]\n", + " image = np.array(image).astype(np.float32) / 255.0\n", + " image = (image * 2.0) - 1.0\n", + "\n", + " # add dimension: (256, 256, 3) -> (1, 256, 256, 3)\n", + " return jnp.array(image[None, ...])" + ] + }, + { + "cell_type": "markdown", + "id": "5265f8b4e749fbb6", + "metadata": {}, + "source": "## **Image Postproessing**" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "996fdd6129d42d60", + "metadata": {}, + "outputs": [], + "source": [ + "def postprocess(tensor):\n", + " # restoration\n", + " tensor = jnp.clip(tensor, -1.0, 1.0)\n", + " tensor = (tensor + 1.0) / 2.0\n", + " tensor = (tensor * 255).astype(np.uint8)\n", + "\n", + " # (1, 256, 256, 3) -> (256, 256, 3)\n", + " return Image.fromarray(np.array(tensor[0]))" + ] + }, + { + "cell_type": "markdown", + "id": "37e8fe3fd0967830", + "metadata": {}, + "source": "## **Run Reconstruct on Sample Images**" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5330754683ccd68e", + "metadata": {}, + "outputs": [], + "source": [ + "vae = load_vae_model()\n", + "\n", + "dest_folder = \"./coco_val2017\"\n", + "image_dir = os.path.join(dest_folder, \"val2017\")\n", + "\n", + "if not os.path.exists(image_dir):\n", + " raise FileNotFoundError(f\"Could not find images folder: {image_dir}\")\n", + "\n", + "image_files = [f for f in os.listdir(image_dir) if f.lower().endswith((\".jpg\", \".jpeg\", \".png\", \".JPEG\"))][:5]\n", + "\n", + "if not image_files:\n", + " raise Exception(\"There are no image files in the folder.\")\n", + "\n", + "psnr_scores = []\n", + "ssim_scores = []\n", + "\n", + "fig, axes = plt.subplots(5, 2, figsize=(10, 25))\n", + "plt.subplots_adjust(hspace=0.3)\n", + "\n", + "for i, file_name in enumerate(image_files):\n", + " img_path = os.path.join(image_dir, file_name)\n", + " raw_img = Image.open(img_path).convert(\"RGB\")\n", + "\n", + " input_tensor = preprocess(raw_img)\n", + " reconstructed_tensor = vae(input_tensor)\n", + " reconstructed_img = postprocess(reconstructed_tensor)\n", + "\n", + " original_resized = raw_img.resize((256, 256))\n", + "\n", + " # convert unit8 to numpy array\n", + " orig_np = np.array(original_resized)\n", + " recon_np = np.array(reconstructed_img)\n", + "\n", + " # PSNR, SSIM calculation\n", + " p_score = psnr(orig_np, recon_np, data_range=255)\n", + " s_score = ssim(orig_np, recon_np, channel_axis=2, data_range=255)\n", + "\n", + " psnr_scores.append(p_score)\n", + " ssim_scores.append(s_score)\n", + "\n", + " # visualization\n", + " axes[i, 0].imshow(original_resized)\n", + " axes[i, 0].set_title(f\"Original: {file_name}\")\n", + " axes[i, 0].axis(\"off\")\n", + "\n", + " axes[i, 1].imshow(reconstructed_img)\n", + " axes[i, 1].set_title(f\"Reconstructed\\nPSNR: {p_score:.2f}, SSIM: {s_score:.4f}\")\n", + " axes[i, 1].axis(\"off\")\n", + "\n", + "plt.tight_layout()\n", + "plt.show()\n", + "\n", + "print(f\"\\n{'=' * 40}\")\n", + "print(\"--- Final Reconstruction Quality Report (N=5) ---\")\n", + "print(f\"Average PSNR: {np.mean(psnr_scores):.2f} dB\")\n", + "print(f\"Average SSIM: {np.mean(ssim_scores):.4f}\")\n", + "print(f\"{'=' * 40}\")" + ] + }, + { + "cell_type": "markdown", + "id": "3b8f6910319ce5a6", + "metadata": {}, + "source": "## **Batch Processing**" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ef3369f8a78df64", + "metadata": {}, + "outputs": [], + "source": [ + "def batch_reconstruct_vae(vae, image_paths):\n", + " # 1. Preprocessing and batch stacking\n", + " input_tensors = []\n", + " original_images_resized = []\n", + "\n", + " for path in image_paths:\n", + " raw_img = Image.open(path).convert(\"RGB\")\n", + " original_resized = raw_img.resize((256, 256))\n", + " original_images_resized.append(original_resized)\n", + "\n", + " tensor = preprocess(raw_img)\n", + " # Assuming the result is in the form [B, H, W, C]\n", + " input_tensors.append(tensor[0])\n", + "\n", + " batch_tensor = jnp.stack(input_tensors)\n", + "\n", + " # 2. Inference\n", + " recon_batch = vae(batch_tensor)\n", + "\n", + " # 3. Results processing and indicator calculator\n", + " batch_results = []\n", + "\n", + " for i in range(len(image_paths)):\n", + " recon_img = postprocess(recon_batch[i : i + 1])\n", + "\n", + " orig_np = np.array(original_images_resized[i])\n", + " recon_np = np.array(recon_img)\n", + "\n", + " p_val = psnr(orig_np, recon_np, data_range=255)\n", + " s_val = ssim(orig_np, recon_np, channel_axis=2, data_range=255)\n", + "\n", + " batch_results.append(\n", + " {\n", + " \"name\": os.path.basename(image_paths[i]),\n", + " \"recon_img\": recon_img,\n", + " \"orig_img\": original_images_resized[i],\n", + " \"psnr\": p_val,\n", + " \"ssim\": s_val,\n", + " }\n", + " )\n", + "\n", + " return batch_results\n", + "\n", + "\n", + "print(\"\\n\" + \"=\" * 50)\n", + "print(\"VAE BATCH RECONSTRUCTION RESULTS\")\n", + "print(\"=\" * 50)\n", + "\n", + "target_paths = [os.path.join(image_dir, f) for f in image_files[:5]]\n", + "results = batch_reconstruct_vae(vae, target_paths)\n", + "\n", + "all_psnr = []\n", + "all_ssim = []\n", + "\n", + "for i, res in enumerate(results):\n", + " print(f\"[{i + 1}] {res['name']}: PSNR={res['psnr']:.2f}dB, SSIM={res['ssim']:.4f}\")\n", + " all_psnr.append(res[\"psnr\"])\n", + " all_ssim.append(res[\"ssim\"])\n", + "\n", + "print(\"-\" * 50)\n", + "print(f\"Batch Average PSNR: {np.mean(all_psnr):.2f} dB\")\n", + "print(f\"Batch Average SSIM: {np.mean(all_ssim):.4f}\")" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/bonsai/models/vae/tests/VAE_segmentation_example.ipynb b/bonsai/models/vae/tests/VAE_segmentation_example.ipynb deleted file mode 100644 index 5fe13196..00000000 --- a/bonsai/models/vae/tests/VAE_segmentation_example.ipynb +++ /dev/null @@ -1,315 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# **Generative Modeling with a Variational Autoencoder (VAE)**\n", - "\n", - "This notebook demonstrates how to build, train, and use a Variational Autoencoder (VAE) model from the Bonsai library to generate new images of handwritten digits.\n", - "\n", - "*This colab demonstrates the VAE implementation from the [Bonsai library](https://github.com/jax-ml/bonsai).*" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## **1. Setup and Imports**\n", - "First, we'll install the necessary libraries and import our modules." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install -q git+https://github.com/jax-ml/bonsai@main\n", - "!pip install -q tensorflow-datasets matplotlib\n", - "!pip install tensorflow -q\n", - "!pip install --upgrade flax -q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import sys\n", - "\n", - "import jax\n", - "import jax.numpy as jnp\n", - "import matplotlib.pyplot as plt\n", - "import tensorflow as tf\n", - "import tensorflow_datasets as tfds\n", - "from flax import nnx\n", - "\n", - "from bonsai.models.vae import modeling\n", - "\n", - "os.chdir(\"/home/neo/Downloads/CODE_Other_Models/bonsai/bonsai/models/vae\")\n", - "sys.path.append(\"/home/neo/Downloads/CODE_Other_Models/bonsai\")\n", - "\n", - "\n", - "import sys\n", - "from pathlib import Path\n", - "\n", - "# Add the bonsai root to Python path for imports\n", - "bonsai_root = Path.home()\n", - "sys.path.insert(0, str(bonsai_root))\n", - "\n", - "# Now you can import from the bonsai package without changing directories\n", - "from bonsai.models.vae import modeling as vae_lib\n", - "from bonsai.models.vae import params as params_lib" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## **2. Load and Preprocess Data**\n", - "\n", - "We'll use the classic MNIST dataset of handwritten digits. We need to normalize the pixel values to the `[0, 1]` range, which is important for the VAE's reconstruction loss." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loading 10 MNIST test images...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", - "I0000 00:00:1758719157.283911 161743 gpu_device.cc:2020] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4741 MB memory: -> device: 0, name: NVIDIA GeForce RTX 2060, pci bus id: 0000:01:00.0, compute capability: 7.5\n", - "2025-09-24 10:05:57.456527: W tensorflow/core/kernels/data/cache_dataset_ops.cc:917] The calling iterator did not fully read the dataset being cached. In order to avoid unexpected truncation of the dataset, the partially cached contents of the dataset will be discarded. This can happen if you have an input pipeline similar to `dataset.cache().take(k).repeat()`. You should use `dataset.take(k).cache().repeat()` instead.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Loaded a batch of 10 images with shape: (10, 28, 28, 1)\n" - ] - } - ], - "source": [ - "import sys\n", - "from pathlib import Path\n", - "\n", - "\n", - "bonsai_root = Path.home()\n", - "if str(bonsai_root) not in sys.path:\n", - " sys.path.insert(0, str(bonsai_root))\n", - "\n", - "\n", - "# --- Load 10 images from the MNIST test set ---\n", - "print(\"Loading 10 MNIST test images...\")\n", - "ds = tfds.load(\"mnist\", split=\"test\", as_supervised=True)\n", - "images_list = []\n", - "labels_list = []\n", - "\n", - "for image, label in ds.take(10):\n", - " # Preprocess: convert to float32 and normalize\n", - " single_image = tf.cast(image, tf.float32) / 255.0\n", - " images_list.append(single_image.numpy())\n", - " labels_list.append(label.numpy())\n", - "\n", - "# Stack the images into a single batch\n", - "image_batch = jnp.stack(images_list, axis=0)\n", - "\n", - "print(f\"Loaded a batch of 10 images with shape: {image_batch.shape}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## **3.Define Model**\n", - "\n", - "Here we'll configure and instantiate our VAE model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Creating a new model with random weights...\n", - "New model created successfully!\n" - ] - } - ], - "source": [ - "# --- Create a randomly initialized model ---\n", - "print(\"\\nCreating a new model with random weights...\")\n", - "\n", - "rngs = nnx.Rngs(params=0, sample=1)\n", - "config = modeling.ModelConfig(input_dim=28 * 28, hidden_dims=(512, 256), latent_dim=20)\n", - "model = params_lib.create_model(cfg=config, rngs=rngs) # This is all you need!\n", - "\n", - "print(\"New model created successfully!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## **4. Reconstruct the Input**\n", - "\n", - "This function performs a full forward pass: image -> encode -> sample -> decode" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Running inference to reconstruct images...\n", - "Reconstruction complete.\n" - ] - } - ], - "source": [ - "# --- Define the JIT-compiled reconstruction function ---\n", - "@jax.jit\n", - "def reconstruct(model: vae_lib.VAE, batch: jax.Array, sample_key: jax.Array):\n", - " \"\"\"Encodes and decodes an image batch using the trained VAE.\"\"\"\n", - " # The model now outputs logits\n", - " reconstruction_logits_flat, _, _ = model(batch, sample_key=sample_key)\n", - "\n", - " reconstructed_probs_flat = jax.nn.sigmoid(reconstruction_logits_flat)\n", - "\n", - " # Reshape the flat output back to the original image shape\n", - " return reconstructed_probs_flat.reshape(batch.shape)\n", - "\n", - "\n", - "# Get a random key for the reparameterization trick\n", - "sample_key = rngs.sample()\n", - "\n", - "print(\"\\nRunning inference to reconstruct images...\")\n", - "reconstructed_images = reconstruct(model, image_batch, sample_key)\n", - "print(\"Reconstruction complete.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## **5. Show Reconstruction**\n", - "\n", - "We'll create a single, JIT-compiled function to perform one step of training. This function computes the loss, calculates gradients, and applies them to update the model's parameters." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Displaying results...\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABdEAAAFbCAYAAAA6McenAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjYsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvq6yFwwAAAAlwSFlzAAAPYQAAD2EBqD+naQAAiI1JREFUeJzt3Xd0VNX39/EdICSEAKH3XqV3kN577yoSiigIVrCLFEV/KgIqIiIYijRFEVTsIE06SJMuvYfe633+8Em+3Hv2JZPJhGSS92st1/LsnJm5mTlz7p2T4XwCLMuyBAAAAAAAAAAAGFIk9AEAAAAAAAAAAJBYsYgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAEhyevToIQEBAdKtWzeP+o8ZM0YCAgKkZMmSxs9u3LghWbNmlYCAAMmRI4fcunXrnvdVr149CQgIiPG/YcOGefz79OzZUwICAqRnz54e38YTERERUrlyZUmbNm30ce3fv9+njwHP/fPPP/LUU09JqVKlJEOGDJImTRopUKCAPPzww/LTTz/F+f6jxuaff/4Z94MVkSlTpsTLuPTX4/CV/fv3q3NGSEiIFC1aVPr06SNbtmxJ6MNEEuDNnDBs2LDoMZk1a1a5efOma99jx45JqlSpovt/+eWXtp9HvXcDAgKkbNmycufOHfV+li9fLgEBAVKgQAHjZwUKFJCAgACZMmWK8bM7d+7IlClTpHHjxpItWzYJDAyUTJkySbFixaRNmzby3nvvRZ/z7j6W2PynPa6Tdt+pU6eWLFmySMmSJeXhhx+WiRMnyoULF1zv488//5SAgACpV69ejI/nqajnjvM+AACeSZXQBwAAgK/16dNHpk+fLt99952cPXtWMmbMeM/+ERER0bdzmj9/vkRGRoqIyIkTJ+THH3+Utm3bxngM5cqVk/Lly7v+/F4/ux9+/PFH6d27twQHB0ujRo0kc+bMIiISGhqaoMeVHFmWJUOGDJH/+7//k9u3b0uuXLmkfv36EhQUJNu3b5dZs2bJrFmzpEWLFjJr1ixJnz59Qh8y7pOOHTtGvyePHTsma9askS+++EKmTZsmc+bMkQ4dOiTwESY+w4YNk+HDh8vQoUNj9cfKhLJ//34pWLCg5M+f3+8WMyMjI2XBggXSsWNH9edTp06V27dve3RfW7ZskS+//FJ69Ojhk2O7fPmytG7dWhYvXiwiIhUrVpQ6depIypQp5d9//5Wff/5Zvv/+ewkJCZGBAwdKkSJFJDw83Lif5cuXy969e6Vw4cJSq1Yt4+dFihTx+JjSpk0rnTp1EpH/FvjPnz8v//77r8yZM0dmzZolzz//vLz99tvy1FNPSUBAgJe/edz17NlTpk6dKhEREUnmj5MAAPgCi+gAgCSnTp06UqRIEdmzZ4/MmDFDBg4c6Np37dq1smXLFgkMDJRHH33U+PnkyZNFRCR37txy5MgRmTx5skeL6O3atUvUCzhff/21iIh89NFH0rdv3wQ+muTt+eefl7Fjx0pwcLB8/vnn0f/yIMqqVauke/fusnDhQmnSpIksXbpUUqdOHevHmTZtmly5ckXy5cvnk+Nu3769VK9eXTJkyOCT+4Np1KhRtm/fnj59Wtq0aSN//fWXPPHEE9K8eXNJkyZNwh0gkq3KlSvLunXr5IsvvnBdRI+IiJCgoCApXry4bN682fW+QkJC5MqVK/LGG29I165dJSgoKM7HN2zYMFm8eLHkypVLfvrpJylbtqzt5+fPn5dvvvlGcubMKSIitWrVUhfJe/bsKXv37pVatWp59K3ze8mSJYt6H8eOHZP33ntPPvzwQ3nmmWfk8OHD8t5779n6VK1aVbZv3y4hISFxOoa7/fHHH3Lz5k3JnTu3z+4TAICkjO1cAABJTkBAgPTu3VtE/vctczdRP2/VqpVky5bN9rNDhw7Jb7/9JilTppSvvvpKAgICZOHChXLs2LH4OfD76ODBgyIiUrRo0QQ+kuTtt99+k7Fjx4qIyOzZs6VXr17GNxCrV68uixcvlowZM8rq1avlzTff9Oqx8uXLJyVKlPDZIkyGDBmkRIkS0YtQiH+ZM2eW999/X0T++xbwypUrE/iIkFyVK1dOKlasKL/88oscPXrU+PmyZctk165d0q5duxj/NdiDDz4oVatWlQMHDsj48eN9cnyzZ88WEZGhQ4caC+gi/81fvXv3lubNm/vk8eIiZ86cMmbMGBk3bpyIiLz//vuybNkyW5+QkBApUaKEz/4IKiJSuHBhKVGihAQGBvrsPgEASMpYRAcAJEk9e/aUlClTyoYNG1y/AXft2jWZNWuWiOhbuXzxxRdy584dad68udSoUUMaNGggt2/flqlTp8brsXvq7j1Sb968Ke+++66UKlVK0qRJI5kzZ5YOHTrI9u3bbbeJ+pZz1D9xr1+/fvQerc5/tn306FF5/vnn5YEHHpCQkBBJly6dVKlSRcaNG6fuDR9131OmTJGtW7dK165dJWfOnJIyZUrbt/Jv3bolkyZNknr16kmmTJkkKChIChYsKP3795dDhw755Pe829mzZ2XEiBFSuXLl6L3GCxUqJF26dFH3Go/t8cXF22+/LSIirVu3vue/cMibN68MGTJERP771wMXL16M/lnUHtoFChSQ27dvy+jRo6VChQoSGhpqW5C/1/7Hly9fliFDhkjRokUlKChIcuXKJb1795YjR45E74Hs/JcVbnuRx+X1+v333+Wpp56S8uXLS5YsWSQoKEjy5MkjXbt2lbVr197rqfTYL7/8IgEBAfLAAw+49rl165bkyJFDAgICZNOmTdH13bt3S+/evaVgwYISFBQkoaGhkj9/fmnZsmWMf7DzlbsXBE+cOKEeuzfj98iRI/LCCy9ImTJlJF26dJI2bVopVqyY9OzZU/766y+j/+HDh+Wpp56SokWLSnBwsGTIkEFq1qwpn332mbqFx93j5fLly/LKK69IkSJFJCgoSHLkyCHh4eFy5MgR9dh+//13ad26tWTPnl0CAwMlY8aMUrRoUenevbssXbo0ul9AQIAMHz5cRESGDx9u24P67nF6917Q8+fPlwYNGkimTJls74+Y9gt3e19EWb9+vYSHh0vBggUlODhYMmXKJOXKlZMXXnhBDhw4ICL/zZkFCxYUEZEDBw4Y+2Zr9/nII49Ivnz5JCgoSDJlyiRNmzaVhQsXqscg8t8fg3v37i05c+aU4OBgKVq0qLz22mty9epV19t4qnfv3q7nxC+++CK6jyfeffddEREZOXLkPfcG91TUe8P5x/HE7Mknn5QqVaqIiBjfRI9pT/Tly5dLs2bNJCwsTEJDQ6VKlSoybdo0ERHX8eTcEz3qXBL1ekb9UVfLckkMcyEAAPcbi+gAgCQpZ86c0qJFCxH535YsTt9++62cO3dOcuXKJc2aNbP9zLKs6A+CUYsAnn67/X67efOmtGjRQkaMGCH58uWTli1bStq0aWXevHlSo0YN2z67tWrVkvDwcMmePbuIiDRt2lTCw8MlPDzc9k/Zly5dKqVLl5YxY8bItWvXpHHjxlKzZk3Zu3evPPXUU9KyZUvXQLm//vpLKleuLGvWrJE6depIy5YtJV26dCIicvHiRWncuLH07dtX1q9fL2XLlpU2bdpIUFCQTJgwQSpUqCAbN26M8+8ZZdOmTVKmTBkZOnSo7NmzR2rVqiVt27aVHDlyyA8//BC9cBPF2+PzJvz17Nmz0QuAnuwDHLXd0IULF9SFPcuypEOHDvLKK69I5syZpU2bNuo3MJ0uX74s9evXl7feekuOHz8uTZo0kVq1asnPP/8sFStWjF7wiy1vXq9+/frJxIkTJUWKFFKzZk1p1aqVZMiQQb766iupUaOGfPPNN14dy90aN24sefLkkR07dsiqVavUPj/99JOcOHFCKlasKOXKlRMRka1bt0rlypWjt6ho1aqVtGjRQnLnzi1Lly6VDz/8MM7H5om7Fxij3sdRvB2/f/zxh5QuXVpGjRolJ0+elIYNG0rLli0lLCxMZs6cKRMnTrT1X7t2rZQrV07GjRsnN27ckHbt2kmNGjVkw4YN0q9fP2nZsqXcuHFDPf7z589LjRo1ZMKECVKyZElp3ry5WJYl06ZNk5o1a8r58+dt/adOnSpNmjSRH3/8UQoWLCgdO3aUOnXqSPr06WX27Nny7bffRvcNDw+Pfr3KlSsXPbc557coH3zwgbRr104uXrwozZo1k7p160rKlCljeAVi9v7770vVqlVl2rRpkjp1amnbtq3UqlVLbt68KaNGjYr+I2atWrWit0JJmzat7Xid+3N/+OGHUrVqVZk5c2b0+7tUqVLy559/SsuWLWXEiBHGcezYsSN6zAYEBEibNm2kWLFiMmbMGGnYsKHra+Sphx9+WIKDg41z4sWLF+Xrr7+WfPnySaNGjTy6r3r16knz5s3l9OnTxrzsjahvbE+YMEGuX78e5/u7X7p37y4i/y2axxRkHmX27NlSt25d+eWXXyRfvnzSpk0bCQkJkV69esnLL7/s8WOHhoZKeHi4FC5cWEREatasaRuPUVkuiWUuBADgvrMAAEiivvvuO0tErMyZM1vXr183ft6oUSNLRKxXX33V+Nmvv/5qiYiVLVs268aNG5ZlWdbVq1etsLAwS0SspUuXqo9Zt25dS0SsoUOH+uz3CA8Pt0TECg8Pt9UXL15siYglIlaFChWsY8eORf/s6tWrVtOmTS0RsR5//HHX41y8eLHxs2PHjlmZM2e2AgICrPHjx1u3b9+O/llkZKTVoEEDS0Ss4cOHq8cpItbLL79su12Uhx9+2BIRq1WrVtaJEydsPxszZowlIlbRokWtW7duxfn3vHTpkpU3b15LRKwePXpYFy9etP383Llz1m+//Rbn47v7d3e+Rvfyxx9/RP9eBw4c8Og2BQsWtETEeuONN6Jr+/bti76fPHnyWDt37lRv6/aaP/fcc5aIWCVLlrSOHj0aXb969arVqVOn6Pt2jumIiAifj8t58+ZZZ86cUeupUqWyMmfObF25csWj47iX1157zRIR64knnlB/3r59e0tErI8//ji61qtXL0tErLfeesvof+XKFWvJkiUeP/693P167tu3z/j5xIkTLRGxsmbNajwX3ozfgwcPWhkyZIh+3zrnyhMnTljLli2Lbl+7ds3Knz+/JSJWv379oudHy7KsvXv3WgUKFFDn1ajXSUSspk2bWufPn4/+2ZkzZ6zy5ctbImK9/fbbtttFjfm7j+HuY9uwYYOtNnTo0Bjn4KjjT5kypTV//ny1z73myHs9zvz58y0RsYKDg605c+YYt9u2bZv1zz//RLejXu/8+fO7Hu/PP/9sBQQEWFmyZDHG2ebNm608efJYImL9+eeftp9VqVLFEhGrS5cu1tWrV6PrBw4csAoXLhz9erj9jvf6vfv06WNZlmU99NBDxjnx888/t81TUc/l9OnTbfcVNSYaNmxoWZZl/f3331aKFCmskJAQ21y0bNky1+co6rWMiIiw1aPGu4hY2bNnt/r27WtNnjzZ2rBhgzF/34s3c7tT1O95r9c4yvLly6OPe8+ePdH1qHm1bt26tv5HjhyxQkNDLRGxPvzwQ9vPlixZYqVNmzb6/pyinjvnPBP1Ozuf0yj3ay4EACCx4ZvoAIAkq2XLlpIjRw45ffq0LFiwwPazgwcPyqJFi0Tkv3+y7BT17fVHH300er/Q4OBgefjhh20/d+PcSsD5399//x3XXy9aQECARERESI4cOaJrwcHB0dsa/P7777G6v7Fjx8rp06dlwIAB0r9/f0mR4n+XC5kzZ5Zp06ZJYGCgjBs3TizLMm5frFgxeeutt2y3ExHZvn27zJo1S3LlyiUzZ840/pn9s88+Ky1atJDdu3er26zE9vecNGmSHDp0SMqXLy9ffPGFhIaG2n6eIUMG27ck43J8OXPmlOLFi8dqf/BTp05F/7/zG8Vuovrdfdu7vf3221KsWDGPj+Hq1avy+eefi4jImDFjbMcfHBws48eP93oPdW/Gpdv+ye3atZPOnTvL6dOno7/FGxdR7/nZs2fLtWvXbD87deqU/PDDDxIUFBT9fhf53/YQUf/C5W5p0qSROnXqxPm47uX48eMyefJkeeGFF6K//Xt3qKi343f06NFy/vx5ad26tbzzzjtGaG22bNls3+L++uuv5cCBA5IrVy4ZO3asbT/lQoUKyahRo0RE5OOPPzaeW5H/vnEdEREh6dOnj65lzJgx+huzznFx4sQJyZAhg/pN8mzZskmFChXcn7QYhIeHS5s2bby+vWbo0KEi8t+2JF26dDF+XrJkyXtuJeR2n5ZlyYQJE4xxVqZMGRk9erSI/PecR1mxYoWsXbtW0qZNK+PHj5fg4ODon+XLly/6dYqrqH+hFbV9i8h/58eAgAD13Hov5cqVk4cffliuXLkSPU9469lnn5WRI0dK2rRp5cSJE/L5559Lnz59pGLFipIxY0YJDw+XnTt3xukx4kOWLFmi///06dMx9p88ebJcunRJHnzwQXn66adtP6tTp47079/f58eY0HMhAAAJhUV0AECSlSpVquh/En/3B3yR/7ZkuXPnjtStW1eKFCli+9np06flu+++ExFzP9eo9tdff23bl9rJuZWA879MmTLF9deLli9fvugtDO4WtVDjts+wmx9//FFERLp27ar+PHfu3FK0aFE5deqU7N692/h5u3bt1C0RFi5cKJZlSfPmzaO3d3GK2u9V24M5tr/nzz//LCL/7XfvyRYNcTm+d955R3bs2CHvvPNOjI8TF9ofLe4WtTWEp9avXy+XLl2SLFmySJMmTYyfZ82aVRo3bhyr+4zi7bg8evSofP755zJo0CB57LHHpGfPntKzZ0/Ztm2biIhPFr4KFy4sderUkfPnz8u8efNsP5sxY4bcvHlT2rZta3ufVq1aVURE+vfvL7/88ou6QOxrBQsWjP7DW86cOeWxxx6TwMBAWbNmjbRs2dLW19vxG/U+efzxxz06pqithLp16yZBQUHGzzt06CAZM2aUixcvyvr1642fV65cWf1jk9u4qFq1qpw/f1569Ogh69evlzt37nh0nJ7o1KmTz+5L5L8/dPz999+SIkUKNWfDG5GRkbJmzRpJkyaNtG7dWu2jva5Rr1OzZs0kc+bMxm3atm0rGTJkiPPxNWzYUPLnzy9ff/21XLp0SbZv3y6rVq2S+vXrS4ECBWJ9f2+++aakTp1aJk+eLLt27YrTsb366qty+PBhmTJlivTq1UvKlSsnKVOmlIsXL8q0adOkQoUK99xPPiHcPb61fcydlixZIiIijzzyiPpzt3pcJMRcCABAYpAqoQ8AAID41Lt3b3n33Xfl119/lSNHjkju3LnFsiyZMmWKiOiBol9++aVcv35dqlWrJiVLlrT9rFKlSlK2bFnZvHmzzJ49W/r27as+brt27VwD53wtau9Xp6hvesZ2P9h///1XRERq164dY99Tp04Z33x2WziJut/JkyfH+E1+7ZvWsf09o/byLlGixD0fy1fHF1t3f+PwxIkTrr/f3U6ePCki/y1uO2XLli3W3xo/fPiwiLi/ZjH97F68GZfDhw+XkSNHuu63LyI+CR0U+W9uWLp0qURERMhDDz0UXY/a39n5LdoXXnhBli9fLr///rs0a9ZMAgMDpVy5clKnTh3p1q1bdCCgL3Xs2FFCQ0Pl9u3bcujQIVm+fLlERkZKly5dZMWKFbZFfm/Hb2zfJ1GL3FGBmE4BAQFSsGBBOXv2rPqHkpjGhXNBbvz48dKqVSuZPn26TJ8+PTrguEGDBvLoo4969L5x4+3YdnPw4EER+e9fpvhigVpEZN++fWJZlly9elX9o8Xd7n5do97b93qdChQoYAvO9UZUFsTw4cNlzpw5smPHDhHxPFDUqUCBAvLkk0/K2LFj5dVXX5W5c+fG6fjCwsJse8yfPXtW5s2bJ6+//rocO3ZMwsPD5cCBA17/ixtfi4yMjP5/T/7YHtMc7usxLpIwcyEAAIkBi+gAgCStWLFiUrt2bVm2bJlMmzZNXnnlFVm8eLHs379fMmTIoH4TMWoB6vDhw+oWAlELFZMnT3ZdRL+fnNumxFXUN+E6deokadOmvWdf7RuOd28xod1v+fLl1W8o361atWpGzde/p1Ncjy+2KlSoIAEBAWJZlqxevTrGxcBTp07Jvn37ROS/P+Y4uT3vnrjXNx49+TakJrav17fffivDhg2T0NBQGTdunDRo0EBy5coladKkkYCAAHn11VflnXfeifHb+J7q3LmzPPXUU/LHH3/I4cOHJU+ePLJhwwbZvHmz5M6d2/hmfkhIiPz222+ydu1a+fnnn+Wvv/6Sv/76S9atWyejR4+WJ598Uj755BOfHFuUUaNG2RbBduzYIQ0bNpQdO3ZIv3795Kuvvor+2f0ev96K7bh44IEHZOfOnfLrr7/KokWL5K+//pJly5bJokWLZMSIETJ58uToMMbYist7xpffiPfkcUJDQ2P9L03ul169esmIESNk4sSJcuDAAcmQIYN06NDB6/t77bXX5IsvvpBvvvlG1qxZ48Mj/W/roN69e0uFChWkYsWKEhkZKStWrPD6X9z42oYNG0REJF26dLFaAHebp72dv+8lIeZCAAASAxbRAQBJXp8+fWTZsmUSEREhr7zySvTWLt26dTMWUdauXStbtmwRkf++cXmvrVBWr14t27Ztk1KlSsXfwSeAvHnzyu7du+Wll16SypUr+/R+RURq1qwp48aN89n9usmXL59s375dduzYYdv73M39Pr5MmTJJ7dq1ZenSpTJt2jTp3LnzPftPnz5dRP5bXInaviGucufOLSIi+/fvd+1zr5/5UtSC8MiRI9WtRbStg+IiJCREunTpIpMnT5apU6fKa6+9Fv0vVMLDw10Xe6tUqRL9Tctbt27Jd999Jz169JDx48dLp06dpH79+j49zruVKFFCpk2bJo0aNZKvv/5ali1bFv0vRrwdv/ny5ZOdO3fKjh07jK2tNFFjJuqb75qoP/ZE9Y2rVKlSSYsWLaL3YL5w4YKMHj1ahg8fLk888YS0b98+xj/4xVbU3vBu23ZFfYP/blF/CDt27JicP3/eJ99Gj3pdAwIC5IsvvvD4jxCevLe138Eb+fPnlwYNGsgff/whIiL9+vWL0x8osmTJIi+88IIMGTJEXn75ZRkxYoRPjvNuFSpUkCxZskhkZKTt298JbcaMGSIi0qBBA4+2IcudO7fs3LnT9XWOz/k7IedCAAASAnuiAwCSvM6dO0v69Oll9+7d8sMPP8i3334rIvpWLpMmTRKR//YDtyzL9b+owLiYtk3wR82bNxcRsX3L1Zf3u2DBgvuyh2qzZs1E5L/98G/fvh1j//t9fCL/7dkrIvLDDz/I/PnzXfsdOnRI3nrrLRERGThwoC2UMS4qVaokISEhcurUKTXoMzIyUn777TefPFZMzpw5IyL/Lcg5nTx5Ml6OI2rLialTp8r169dl5syZIiLSs2dPj26fKlUq6dSpkzRt2lRExKeBwW4aNmwYHYY5ZMiQ6Lq34zfqfRIVMBuTqD/gzJkzR32cefPmydmzZyVdunTqv5jwhfTp08uwYcMkLCxMrly5Yts7O2rx+9atW3F6jKhF6O3btxs/u3LlihpwmyNHDilXrpzcuXPHyOFwE9Px5sqVS8qWLSsXL16M3r/eE3Xr1hWR//a8j3pv3W3BggVy7tw5j+8vJo8//rhkzpxZMmfO7JP94J977jnJkSOHLF68WA2ajklM/2Ll3Llz0VtD5cmTx6tj9LXx48fL2rVrRUTkxRdf9Og2USGes2bNUn8eNafFhjfvoYSYCwEAuN9YRAcAJHkhISHRex737t1brl69KmXKlDH27bxy5YrMnj1bRCR6/1Q3PXr0EJH/9k+/1/7N/uiFF16QsLAwGT16tHzwwQdy48YNo8++ffvkyy+/jNX9VqhQQTp27CiHDh2SDh06qN+Qu3z5ssyYMUNOnDjh7eFHe+yxxyRPnjyyceNG6du3r1y+fNn28wsXLtgWjuNyfK+88oqUKFFCXnnllVgdY9OmTeWpp54SEZGHHnpIpkyZYiz+rF69WurXry9nz56VypUry9ChQ2P1GPcSEhIijz32mIj8t2h19+91/fp1GThwoPG8xZeoYMmJEyfaxtz58+clPDxczp8/7/PHrFGjhhQvXjz6X16cPn1aatWqJUWLFjX6jh8/Xg01PX78uKxbt05EzD8AlChRQkqUKOHzLSnefvttSZEihSxZsiT627/ejt/nn39e0qVLJwsWLJDXX3/dmM9Onjwpy5cvj2537txZ8uXLJ0ePHpXnn3/ettC2b98+GTRokIiIPPXUUxIcHByn3/PKlSsyevRoNYNg2bJlcu7cOUmZMqVtETTq/6OCaL0V9a9XPvnkE9u/SLp8+bI8/vjjcujQIfV2Ue/P1157Tb755hvj5//8849tYT5r1qySOnVqOX78uLrYLSLRf0Dr1auXfP/998bPo7aE+vXXX6NrtWvXlooVK8qlS5dkwIABtgyCQ4cOyeDBg11/d2906dIl+lvdvvgXTGnTppU33nhDRETGjh0b69tXrVpVxo8frz6nx48fl/DwcLlx44bkz59fHnzwwbgebpwcP35cnn/+eRk4cKCI/Hc+qVGjhke37dOnj4SEhMjy5cuNLVRWrFgh48ePj/XxxPQe8mYuBAAgKWA7FwBAstCnTx/57LPPohdjtG/Kff3113LhwgXJkSOHsR+yU9OmTSV79uxy4sQJWbBggbFX7XfffXfPf0ZdsWJFefrpp2P/i9wHefLkkfnz50vHjh1l8ODB8t5770np0qUlZ86ccv78edm+fbvs3btXqlWrFuu9iCMiIuTcuXPy008/SfHixaVcuXJSsGBBsSxL9u/fL5s2bZIbN27I9u3bJXv27HH6PUJDQ2XBggXSokULiYiIkHnz5knNmjUlNDRUDh06JBs3bpSqVavatnrx9viOHTsmO3fulGPHjsX6OD/88EMJCQmR999/X3r16iWvv/66VKlSRYKCgmT79u2yefNmEflvzM2ZMyfGcMHYGjlypKxYsULWr18vRYoUkQYNGkhwcLAsX75cbty4IeHh4TJ16tTobyfGl2effVamTZsmCxculEKFCkn16tXl5s2bsmTJEgkJCZHevXt7/O3e2OjVq5e8/PLL8uGHH4qIeyDixIkTZcCAAVKwYEEpXbq0pE+fXk6dOiXLli2Tq1evSoMGDaK/IR4laqHpypUrPj3mUqVKSffu3WXatGkydOhQadiwoYh4N37z5csnc+fOlU6dOsnIkSNl0qRJ8uCDD0pgYKAcOHBANm7cKA8//HB0PkRQUJDMnTtXmjVrJp9++qksXLhQqlevLhcvXpRFixbJtWvXpGnTpj75Y8+NGzdk0KBB8sILL0iZMmWkaNGiEhgYKPv375dVq1aJyH+L1XcH7TZt2lTSpk0r3333XfQfRFKmTCk1a9Y0wmLvpUuXLjJ27FhZt26dlCpVSmrVqiV37tyRdevWSerUqV3HY/v27WXkyJHy+uuvS6dOnaREiRJSrlw5uXr1quzZs0f++ecfiYiIiP6jUWBgoLRp00bmzp0r5cuXl1q1akWHXEb9y6jWrVvLhx9+KIMGDZI2bdpIkSJFpHjx4pIhQwY5deqUbNq0SU6ePCkvvfSS7dw1ffp0qVevnsyePVuWLl0qtWrVkitXrsiiRYukbNmykiVLFlm5cmXsX5j7pG/fvjJmzBivtnLavXu3DBgwQJ5++mkpU6aMFC5cWFKlSiVHjhyR1atXy82bNyVTpkwye/ZsSZXq/nwkjoyMjP5XLnfu3JGLFy/K3r17Zdu2bXLnzh0JDQ2Vd955RwYMGODxfebJk0c+++wzCQ8Pl4EDB8rEiROlVKlScvToUVm2bJk8//zzMmrUKAkMDPT4Ptu1ayfDhw+Xjz76SLZu3Sp58+aVFClSSJs2baRNmzZezYUAACQJFgAAyUSZMmUsEbFSp05tRUZGGj+vXbu2JSLW4MGDPbq/Z5991hIRq3nz5tG1unXrWiIS439t27b1+LjDw8MtEbHCw8Nt9cWLF1siYtWtW9f1tlGP5xR1nIsXL3a97YkTJ6whQ4ZYFStWtNKlS2elTp3aypMnj1WjRg1r6NCh1ubNm9XjjIiIuOfvc/v2bWvmzJlWixYtrOzZs1uBgYFW5syZrdKlS1u9evWy5s2bZ924ccMnv6dlWdapU6es119/3SpTpoyVNm1aK02aNFahQoWsrl27Wj///HOcj+/u3935GsXG1q1brQEDBlglSpSwQkNDraCgICtv3rxW165drR9++MH1dvv27bNExMqfP/897/9er/nFixetV1991SpUqJCVOnVqK0eOHNajjz5qHThwwOrdu7clItZnn31mu01ERITPx+W+ffusRx55xMqXL58VFBRk5c+f3+rXr591/Phxa+jQoZaIWEOHDvXoODx19OhRK2XKlJaIWGnTprUuXryo9vvhhx+s/v37WxUqVLCyZs0a/X6oV6+eNXXqVGNM3P173ut95hT1eoqItW/fPtd++/fvt4KCgiwRsY1jb8avZVnWgQMHrGeeecYqXry4FRwcbIWGhlrFihWzevfuba1cudLof/DgQWvAgAHRYyZdunTWgw8+aH366afWzZs3jf4xvU7aOL5586Y1YcIE66GHHrJKlChhZciQwUqTJo1VuHBhq2PHjtYff/yh3tfSpUutRo0aWRkzZrRSpEhhPG7+/PljfH4ty7LOnj1rDRw40MqTJ48VGBho5c6d23r88cetEydOuI7HKCtXrrQeeughK3fu3FZgYKCVKVMmq1y5ctaLL75oHThwwNb39OnT1hNPPGHly5fPCgwMdH1/bNmyxXr88cetokWLWsHBwVZISIhVqFAhq2nTptZHH31kHTlyxLjNgQMHrJ49e1rZs2e3UqdObRUqVMh66aWXrMuXL3t0HnCK+r379Onj8W2iHmf69Om2etSYaNiwoettv/rqq+jnQ5vjol5L53lny5Yt1pgxY6zWrVtbJUqUsMLCwqxUqVJZmTJlsmrUqGENHz7cOnXqVIzH7ou5Per3vPu/qDHxwAMPWN26dbM+++wz6/z58673EdO8+ueff1qNGze20qdPb4WEhFgVK1a0Jk+ebB08eNASEStnzpzGbe71Ppg3b55Vs2ZNK126dFZAQIBtrHszFwIAkBQEWFYMG8YBAAAgWbp586aULl1adu3aJevXr5eKFSsm9CEBADw0bdo0CQ8Pl9atW8uCBQsS+nAAAPBr7IkOAACQzK1fv17u3Lljq126dEkGDhwou3btkrJly7KADgCJ0MGDB+X48eNGfcWKFdF738dmKyMAAKBjT3QAAIBkrmPHjnLlyhUpU6aMZMuWTU6ePCl///23nDlzRjJlyiRTpkxJ6EMEACgWLVokffr0kXLlykm+fPkkZcqUsnfvXtm0aZOI/LeA3r59+wQ+SgAA/B/buQAAACRzH330kcybN0927NghZ8+elRQpUkj+/PmlSZMmMnjwYMmbN29CHyIAQLFjxw4ZNWqULFu2TE6cOCGXL1+WsLAwKV++vPTu3VseeuihhD5EAACSBBbRAQAAAAAAAABwwZ7oAAAAAAAAAAC4YBEdAAAAAAAAAAAXLKIDAAAAAAAAAOCCRXQAAAAAAAAAAFywiA4AAAAAAAAAgAsW0QEAAAAAAAAAcMEiOgAAAAAAAAAALlhEBwAAAAAAAADABYvoAAAAAAAAAAC4YBEdAAAAAAAAAAAXLKIDAAAAAAAAAOCCRXQAAAAAAAAAAFywiA4AAAAAAAAAgAsW0QEAAAAAAAAAcMEiOgAAAAAAAAAALlhEBwAAAAAAAADABYvoAAAAAAAAAAC4YBEdAAAAAAAAAAAXLKIDAAAAAAAAAOCCRXQAAAAAAAAAAFywiA4AAAAAAAAAgAsW0QEAAAAAAAAAcMEiOgAAAAAAAAAALlhEBwAAAAAAAADABYvoAAAAAAAAAAC4SPaL6Pv375eAgAAZNWqUz+7zzz//lICAAPnzzz99dp/wD4wn+BpjCr7EeIKvMabgS4wn+BpjCr7EeIKvMabgS4yn+OeXi+hTpkyRgIAAWbduXUIfSrz49ttvpWvXrlKoUCEJCQmR4sWLy6BBg+TcuXMJfWhJUlIfTyIiR44ckS5dukhYWJikT59e2rZtK//++29CH1aSlRzG1N0aN24sAQEBMnDgwIQ+lCQpOYyn33//XerXry9ZsmSRsLAwqVq1qkyfPj2hDyvJSupjqkCBAhIQEKD+V7Ro0YQ+vCQnqY+nKHPmzJEHH3xQ0qZNK2FhYVKjRg1ZtGhRQh9WkpQcxhTX5vdPchhPIsxR91NSH1Pz5s2Tpk2bSq5cuSQoKEjy5MkjnTp1kq1btyb0oSVJSX087dy5U5577jmpUaOGBAcHS0BAgOzfvz+hD8trqRL6AGB6/PHHJVeuXNK9e3fJly+fbNmyRcaNGycLFy6UDRs2SJo0aRL6EOFHLl26JPXr15fz58/Lq6++KoGBgTJmzBipW7eu/P3335I5c+aEPkT4sW+//VZWrlyZ0IcBP7ZgwQJp166dPPjggzJs2DAJCAiQr776Snr06CGRkZHy3HPPJfQhws+MHTtWLl26ZKsdOHBAXn/9dWnSpEkCHRX82bBhw2TEiBHSqVMn6dmzp9y8eVO2bt0qR44cSehDgx/i2hy+xhwFX9qyZYtkzJhRnnnmGcmSJYscP35cvvjiC6lataqsXLlSypUrl9CHCD+ycuVK+eijj6RkyZLywAMPyN9//53QhxQnLKInQnPnzpV69erZapUqVZLw8HCZMWOGPPbYYwlzYPBL48ePl927d8uaNWukSpUqIiLSvHlzKV26tHzwwQfy9ttvJ/ARwl9du3ZNBg0aJC+99JK88cYbCX048FPjxo2TnDlzyqJFiyQoKEhERJ544gkpUaKETJkyhUV0xFq7du2M2ltvvSUiIo888sh9Phr4u1WrVsmIESPkgw8+YD6CT3BtDl9ijoKvaZ/rHnvsMcmTJ498+umnMmHChAQ4KvirNm3ayLlz5yRdunQyatQov19E98vtXDxx48YNeeONN6RSpUqSIUMGSZs2rdSuXVsWL17sepsxY8ZI/vz5JU2aNFK3bl31n6vs2LFDOnXqJJkyZZLg4GCpXLmyLFiwIMbjuXLliuzYsUMiIyNj7OtcQBcRad++vYiIbN++Pcbbw/f8eTzNnTtXqlSpEn2RLiJSokQJadiwoXz11Vcx3h7xw5/HVJT33ntP7ty5I4MHD/b4Nogf/jyeLly4IBkzZoxeQBcRSZUqlWTJkoV/eZWA/HlMaWbOnCkFCxaUGjVqeHV7xI0/j6exY8dKjhw55JlnnhHLsox/5YCE4c9jimvzxMefxxNzVOLkz2NKky1bNgkJCWGL4QTiz+MpU6ZMki5duhj7+Ysku4h+4cIFmTRpktSrV0/effddGTZsmJw6dUqaNm2q/uVj2rRp8tFHH8mAAQPklVdeka1bt0qDBg3kxIkT0X22bdsm1atXl+3bt8vLL78sH3zwgaRNm1batWsn8+bNu+fxrFmzRh544AEZN26cV7/P8ePHRUQkS5YsXt0eceOv4+nOnTuyefNmqVy5svGzqlWryt69e+XixYuePQnwKX8dU1EOHjwo//d//yfvvvsuC52JgD+Pp3r16sm2bdtkyJAhsmfPHtm7d6+8+eabsm7dOnnxxRdj/VzAN/x5TDlt3LhRtm/fLg8//HCsbwvf8Ofx9Mcff0iVKlXko48+kqxZs0q6dOkkZ86cXl/Twzf8dUxxbZ44+et4EmGOSqz8eUxFOXfunJw6dUq2bNkijz32mFy4cEEaNmzo8e3hO0lhPCUZlh+KiIiwRMRau3ata59bt25Z169ft9XOnj1rZc+e3erdu3d0bd++fZaIWGnSpLEOHz4cXV+9erUlItZzzz0XXWvYsKFVpkwZ69q1a9G1O3fuWDVq1LCKFi0aXVu8eLElItbixYuN2tChQ735la0+ffpYKVOmtHbt2uXV7eEuKY+nU6dOWSJijRgxwvjZJ598YomItWPHjnveB2IvKY+pKJ06dbJq1KgR3RYRa8CAAR7dFrGT1MfTpUuXrC5dulgBAQGWiFgiYoWEhFjfffddjLeFd5L6mHIaNGiQJSLWP//8E+vbImZJeTydOXPGEhErc+bMVmhoqPX+++9bc+bMsZo1a2aJiDVhwoR73h7eScpjimvz+y8pjyfmqISRlMfU3YoXLx59bR4aGmq9/vrr1u3btz2+PTyTXMaTZVnW+++/b4mItW/fvljdLjFJst9ET5kypaROnVpE/vuL/5kzZ+TWrVtSuXJl2bBhg9G/Xbt2kjt37uh21apVpVq1arJw4UIRETlz5owsWrRIunTpIhcvXpTIyEiJjIyU06dPS9OmTWX37t33DO6oV6+eWJYlw4YNi/XvMnPmTJk8ebIMGjRIihYtGuvbI+78dTxdvXpVRMS2TUKU4OBgWx/cX/46pkREFi9eLN98842MHTs2dr804o0/j6egoCApVqyYdOrUSWbNmiVffvmlVK5cWbp37y6rVq2K5TMBX/HnMXW3O3fuyOzZs6VChQrywAMPxOq28B1/HU9R2yKcPn1aJk2aJIMHD5YuXbrIjz/+KCVLlozeax/3n7+OKa7NEyd/HU/MUYmXv46pu0VERMjPP/8s48ePlwceeECuXr0qt2/f9vj28J2kMJ6SiiQdLDp16lT54IMPZMeOHXLz5s3oesGCBY2+2uJ0sWLFovel27Nnj1iWJUOGDJEhQ4aoj3fy5EnbQPWFZcuWSZ8+faRp06YycuRIn943Yscfx1PUNhvXr183fnbt2jVbH9x//jimbt26JU8//bQ8+uijtr08kfD8cTyJiAwcOFBWrVolGzZskBQp/vvbfpcuXaRUqVLyzDPPyOrVq+P8GPCOv46puy1ZskSOHDlC2Foi4I/jKeoaKTAwUDp16hRdT5EihXTt2lWGDh0qBw8elHz58sXpceAdfx5TXJsnPv48npijEid/HFN3e/DBB6P/v1u3btFfRhg1apTPHgOe8/fxlFQk2UX0L7/8Unr27Cnt2rWTF154QbJlyyYpU6aUd955R/bu3Rvr+7tz546IiAwePFiaNm2q9ilSpEicjtlp06ZN0qZNGyldurTMnTtXUqVKsi9Xouev4ylTpkwSFBQkx44dM34WVcuVK1ecHwex569jatq0abJz50757LPPZP/+/bafXbx4Ufbv3x8dPIP7x1/H040bN2Ty5Mny4osvRi+gi/z3YbB58+Yybtw4uXHjRvQ3L3D/+OuYcpoxY4akSJFCHnroIZ/fNzznr+MpKmgrLCxMUqZMaftZtmzZRETk7NmzLFAlAH8eU1ybJz7+PJ6YoxInfx1TbjJmzCgNGjSQGTNmsIieAJLaePJnSXZVdu7cuVKoUCH59ttvJSAgILo+dOhQtf/u3buN2q5du6RAgQIiIlKoUCER+e+DfaNGjXx/wA579+6VZs2aSbZs2WThwoUSGhoa748Jd/46nlKkSCFlypSRdevWGT9bvXq1FCpUKEklJfsTfx1TBw8elJs3b0rNmjWNn02bNk2mTZsm8+bNk3bt2sXbMcDkr+Pp9OnTcuvWLfWfht68eVPu3LnDPxtNIP46pu52/fp1+eabb6RevXosSiUwfx1PKVKkkPLly8vatWuNP+gdPXpURESyZs0ab48Pd/48prg2T3z8eTwxRyVO/jqm7uXq1aty/vz5BHns5C4pjid/laT3RBcRsSwrurZ69WpZuXKl2v+7776z7fmzZs0aWb16tTRv3lxE/vtLbr169eSzzz5Tvzlw6tSpex7PlStXZMeOHRIZGRnjsR8/flyaNGkiKVKkkF9++YUTXyLgz+OpU6dOsnbtWtvF+s6dO2XRokXSuXPnGG+P+OGvY6pbt24yb9484z8RkRYtWsi8efOkWrVq97wP+J6/jqds2bJJWFiYzJs3T27cuBFdv3Tpknz//fdSokQJ/ll7AvHXMXW3hQsXyrlz5+SRRx7x+DaIH/48nrp27Sq3b9+WqVOnRteuXbsmM2bMkJIlS/IHmgTiz2OKa/PEx5/HE3NU4uTPY+rkyZNGbf/+/fLHH39I5cqVY7w9fM+fx1NS49ffRP/iiy/k559/NurPPPOMtGrVSr799ltp3769tGzZUvbt2ycTJkyQkiVLRgdw3K1IkSJSq1Yt6d+/v1y/fl3Gjh0rmTNnlhdffDG6zyeffCK1atWSMmXKSN++faVQoUJy4sQJWblypRw+fFg2bdrkeqxr1qyR+vXry9ChQ2PcfL9Zs2by77//yosvvijLly+X5cuXR/8se/bs0rhxYw+eHcRWUh1PTz75pHz++efSsmVLGTx4sAQGBsro0aMle/bsMmjQIM+fIMRaUhxTJUqUkBIlSqg/K1iwIN9Aj0dJcTylTJlSBg8eLK+//rpUr15devToIbdv35bJkyfL4cOH5csvv4zdk4RYSYpj6m4zZsyQoKAg6dixo0f9ETdJdTw98cQTMmnSJBkwYIDs2rVL8uXLJ9OnT5cDBw7I999/7/kThFhLqmOKa/OEkVTHE3NUwkmqY6pMmTLSsGFDKV++vGTMmFF2794tkydPlps3b8r//d//ef4EIVaS6ng6f/68fPzxxyIismLFChERGTdunISFhUlYWJgMHDjQk6cn8bD8UEREhCUirv8dOnTIunPnjvX2229b+fPnt4KCgqwKFSpYP/zwgxUeHm7lz58/+r727dtniYj1/vvvWx988IGVN29eKygoyKpdu7a1adMm47H37t1r9ejRw8qRI4cVGBho5c6d22rVqpU1d+7c6D6LFy+2RMRavHixURs6dGiMv9+9fre6devG4ZmDJqmPJ8uyrEOHDlmdOnWy0qdPb4WGhlqtWrWydu/e7e1ThhgkhzHlJCLWgAEDvLot7i05jKcZM2ZYVatWtcLCwqw0adJY1apVsz0GfCs5jKnz589bwcHBVocOHbx9muCh5DCeTpw4YYWHh1uZMmWygoKCrGrVqlk///yzt08ZYpAcxhTX5vdPchhPzFH3V1IfU0OHDrUqV65sZcyY0UqVKpWVK1cuq1u3btbmzZvj8rTBRVIfT1HHpP1397H7iwDLuuvfAwAAAAAAAAAAgGhJdk90AAAAAAAAAADiikV0AAAAAAAAAABcsIgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuEjlaceAgID4PA74KcuyvL4tYwoab8cU4wka5ij4GnMUfIk5Cr7GHAVfYo6CrzFHwZeYo+BrMY0pvokOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwEWqhD4AAAAAALjfGjdubNQGDBhg1Nq0aWPU3nvvPVv75Zdf9t2BAQAAINHhm+gAAAAAAAAAALhgER0AAAAAAAAAABcsogMAAAAAAAAA4IJFdAAAAAAAAAAAXARYlmV51DEgIL6PJVnKmTOnUcuUKZNRu3Xrlq29c+fOeDum2PBw+Kj8aUxVrFjRqPXp08eo9e/f39aeP3++0efXX3/1+jj++ecfW3vJkiVe31di5e2Y8qfxhPsnucxRuH+Yo+BLzFHxR7vGbtq0qa09evRoo0+GDBk8uv+bN2/a2log6eTJkz26L19ijoIvMUfdX6Ghobb2559/bvTp1q2bUVu1apVRc853Fy5ciOPR+QZzVOykTp3aqAUFBXl020aNGtnaQ4cONfqUKVPGo/ty3vatt97y6HbxjTnq/nI+38OHDzf6DBs27D4dTfyIaUzxTXQAAAAAAAAAAFywiA4AAAAAAAAAgAsW0QEAAAAAAAAAcMEiOgAAAAAAAAAALlIl9AEkJ0WKFDFqixcvNmpaEJIzvOjTTz81+jz//PNxODpEKV++vFH74YcfjFr27NmNmjOEoE2bNkYfreaps2fP2tpLly41+mghWYcPHzZq+/fv9/o4AAAAEoIzeE9EpHv37katd+/eRq1SpUo+O46UKVPa2unSpfPZfePeUqUyP8I+9thjRq1o0aIx3telS5eM2qRJk4zayZMnjdr169djvH/ATYkSJYzawoULbe0CBQoYfbTQu2rVqhm1Rx991Nb+5JNPYnmEcM7zxYsXN/o88cQT8XoMZcuWNWq1a9c2alpIpiehm54Gc2pjDEmbJwGhdevWjf8DSWT4JjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAFwSL3qVOnTpG7euvvzZqWvhCREREjPdXunRpo48WjqTdf2BgoK3dv39/o48WOtGoUSOjhv/RQkS//fZbo6aFiCaETJky2dpt27Y1+mi1f/75x6jNnDnT1h41apTRxxloCzvne3rRokVGn88++8yoDRkyJN6Oyde0sLbOnTsbNWeA2+nTp+PtmOA/tLnz4YcfNmraXOz08ccfG7V169Z5dVwA/JczeE9EpGbNmkbNk5A1LRhyzJgxRm3AgAFGzRn2PnbsWKMP4sfrr7/uUc0T2jh57bXXjNrixYuN2u+//37PtojI+vXrvTouJC05c+Y0ar/88otRy5s3r609ceJEo8+IESOM2p49e4yaFsCL2MmWLZutvXnz5gQ6kvvr6tWrRk1bIwHq1auX0Idw3/FNdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABwkaw3ygoLC7O1tX3Ns2TJYtS0PctfeOGFGB/v6NGjRq1Pnz4x3k5EZOjQobb2Aw88YPS5ceOGR/eF/5k6dapRy58/fwIcSfwqWbKkUXvrrbdsbW3v4meffTa+DilJcO4DnjVrVqNPmzZtjNrkyZON2v79+312XL70yiuvGDVtPE2YMMHW1vZNR9KSMmVKo/biiy/a2s8//7zRR9t/1pn3oMmVK5dRa9y4cYy3S85KlChha2t7NufOnduoaXvNa7fdtGmT18cGaJxjdv78+UYf557BsXHmzBlbu2/fvkaf7777zqhp+xnPmjXL6+OA5x566CGjpmXLaJ/PfKl+/fox1oYNG2b02bBhg1GbM2eOUVuyZImtzfzqv9KkSWPUtKwFbS77+eefbe1BgwYZfS5fvmzUfvjhB6O2devWex4n4EabY7W1MiRtdevWTehDSJT4JjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAF8kmWLRq1apGzRmsGJdASS1o4d9//42xz/Hjxz26/zfffDPGPnv37vXovnD/7Nixw6i1bdvWqF2/ft2oaUFKtWvXtrWd4bgiIjVq1IjFEf7Pk08+adS0AEAt4ObWrVtePaY/0Z7rPHnyeHW7oKAgHxyR7xUoUMCohYSEeHTbhg0b+vhokJiUK1fOqGkBas75TQtvHj58uFE7dOiQUZs2bZqt3aBBg5gO01WOHDmMmqfnX3/mDIxu2rSpR7crXbq0UevevbtR27Vrl629fPnyWByd3cKFC43a1atXbe0OHToYfbwNd9TCnA8cOODVfcE7qVKZH0OeeuopW7tIkSJe3//BgweN2nPPPWdrayGiGmeQOO6f119/PaEPwWOpU6c2atWqVfOodunSJVtbm9v69+8fh6PD/dKzZ0+j1qVLF6O2b9++GPtpIaIaZ7C7iMiRI0c8ui3cOcOotfWcXr16eX3/GzdutLWd4doielCtt5zXVSJ6iOiMGTN89pjwD/Xq1fOo5qR9rkvq+CY6AAAAAAAAAAAuWEQHAAAAAAAAAMAFi+gAAAAAAAAAALhgER0AAAAAAAAAABfJJli0RYsWRs2TILwVK1YYNS3wMb6DOzJlymRra4GPzuALmFq2bGlrxyVMVnPq1Clbu02bNkYfTwNg33vvvRhrznEhIlK/fn2jNnHiRKPmDLxMmTKl0WfgwIFGbcyYMUZNC2hLasqWLWvUOnfuHOPtnOGIIiI7d+70yTH5Wt++fY2aFjaKpK169epGbcqUKUatcOHCRs05hrQApjt37nh0HM6QrJ9++snoU6lSJaM2dOhQo7Zp0yajpgUpJTWrVq2ytbWgOu2aRqOFQJYsWfKe7dh4/PHHvbrdE0884dXtLl68aNTWrFlj1Bo1auTV/cNOC0tzhoiKiPTr189nj8n5y/98/PHHRk0bOylSmN8D8/Tc4sntjh49atRmz55t1JyByEuWLDH65MqVy6h17drVqDlDb7W5rVWrVkatffv2Ru3vv/82ardu3TJqiLvKlSsbtbFjxxo17XO6FjbqDJj1VHL4LJYQrl+/bms//fTTRp+pU6d6ff/O9+r69euNPtr1tqc8OX7tWh3JjychovgP30QHAAAAAAAAAMAFi+gAAAAAAAAAALhgER0AAAAAAAAAABcsogMAAAAAAAAA4CLZBItu27bNqH399de29tatW40+b731Vrwdk5vHHnvMqKVPn97WtizL6DNnzpx4O6akIl++fLZ2unTpfHr/ztA2T0NEvaWF1HzzzTdGrWjRokZt5MiRXj3m999/b9Rat25t1JJawI0WzurvypUrZ2s/+eSTXt/XgQMH4no4SCQGDRpk1IoXL27U2rZta9QWLFjgs+O4fPmyra2Fs61du9aovfHGG0Zt9OjRPjsuf+IMlOrdu7fRZ8SIEUatadOmRu3ChQtGrUePHrZ23rx5Y3uIsZIzZ06jpoUChoaGxnhf2vl/48aN3h0YYqQFo3kbIvrbb78ZNS2QEomf831Yp04do4/2mUd732thwc7Av4oVKxp9fv31V6P25ptvmgfrJS2kdMyYMUbt2LFjtvaMGTOMPtoc6AyQFhEZMGCAUfvss8/ueZzwjhbUGBgYaNRWrlxp1LQQSSRuV69eNWrLly/3+v6c4ycu11FaeLDzs11cQlABzbBhwxL6EO47vokOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwEWApaW1aB0DAuL7WPD/LVq0yKg5g3b++OMPo0/Lli2NmhYw4UseDh9VQowpZxhI6tSpfXr/u3btsrUfeOABn96/t4KCgoxamzZtbO3Zs2d7ff9auF/16tW9ui9vx1R8j6dz584ZtQwZMsR4Oy2ceMiQIb44pDirWrWqrb169Wqv76t27dq2dlxCdnzJ3+ao+61AgQJGTQtE/vzzz41a//79jZq3z7cz9FnEDAps1aqV0UcL/H3uueeM2rVr17w6Lk1inaOSg2LFihk17Tz77bffGrUUKezfG7l9+7bRp0+fPkYtvkO4ksscpV3b1q1bN8bbaefehg0bGrW///7bm8NKkvxpjgoPD7e1J0+e7NHttGN99tlnjZo/B85qwaJdu3b16LY//vijUdPCwD2RXOYoTzmvnf/66y+jj3YdVblyZaOmheEmB/40R/nSU089ZdTeffddWzsuaxM9e/Y0al9++aXX9+cvmKN8w9Pn8c8//7S169evHw9Hk7Biei74JjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuEiV0AeQ3FWrVs2olSxZMsbbafvTxvf+50mBc2/wuOyhpcmfP7+t3b17d6NPQuxNdv36daPm3Fdf29OvRo0aHt1/cHCwdweWSA0fPtyohYaGxng7bQ/ECRMm+OSYgPiQI0cOo6btD7hkyRKjps2fqVLZLyu0fdMbNGhg1Jo1a2bU9uzZY2t36tTJ6DNv3jyjhqRr9+7dRu3//u//jJpz/3MRc7y+8MILRp/43v88OStUqJBXt+vRo4dRY/9z/5QrVy6jNm7cOK/u6+jRo0Zt0qRJXt1XYnX8+HGvb5szZ04fHknype1PPWXKFFtbO99Mnz7dqGn7n2ufn5yPeeHChZgOE4nQgAEDjNp7771n1AIDA332mMlh/3P4xrBhw7y+bVLcAz22+CY6AAAAAAAAAAAuWEQHAAAAAAAAAMAFi+gAAAAAAAAAALhgER0AAAAAAAAAABcEi95HpUuXNmo//vijUQsLCzNqS5cutbV//fVXnx0XfMcZXJo7d+4EOpKYnTlzxtY+d+5cwhxIIuQMiBURSZkyZYy3CwkJMWp58uQxakeOHPHuwAAfK1++vEf9IiMjjVq/fv2MmjNIqVSpUkafs2fPGrV3333XqH388ce29unTp2M8TiRt9erVM2rt27f36LajR4+2tceMGeOLQ4LilVdeMWr58uXz6r6WLVvm9XFo1921a9eO8XZNmzY1am3atInxdvPnzzdqXbt2NWo3btyI8b6SIi1UWrtu8oQW0nj16lWv7iuxSpcunVHTgr81zs+N8E6HDh2MWokSJWK8XbFixYzavn37jJozjF3E/Lxx7do1o8/s2bON2tChQ43azZs373mc8A3tOmTgwIFGzZchohrt3OuJefPmGbUdO3bE9XCQiGnzBTzHN9EBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuPD7YFEttKxdu3ZGTQsEqly5coz3nyKF+XeGO3fuGLW1a9fGWHvooYeMPpkzZzZqWsDjsGHDbO0LFy4YfRCz5cuX29q1atWK18fzNAAoMdACULQQHO13Klu2rFHr37+/rf3pp5/G4ejur1GjRhk1bQ7JmDGjrZ0zZ06jz6xZs4zanj174nB0vpMhQwaf3deIESNs7WbNmhl9kmuYWmKmnYM0P/zwg1HTArE2btxoa/fq1cvoowViXb9+3aPjQPLy2GOP2dqff/65R7dzBmeLiIwcOdInxwQ7bR7QQkQty/Lo/saOHWtrX7582ehTrlw5o6aFMM6ZM8eo5ciRw6PjcPLk+LXrhODgYKOWXM+FFSpUMGqejgsnT+cCf9KqVStbu0+fPkYfT58vb59X2HmyVqDp3r27UdPe99o4dgaJhoeHG31efvllo/bzzz8bNQJmfa9IkSJGbe7cuQlwJKa3337bqGnrVk5vvfWWUfvqq6+M2pAhQ2ztxPJ5Frjf+CY6AAAAAAAAAAAuWEQHAAAAAAAAAMAFi+gAAAAAAAAAALhgER0AAAAAAAAAABeJOli0U6dOtvaTTz5p9Klbt65R82XoihbGoN1OCx7xJIxEu3/t9yQYxDecIY81a9b0+r60MNljx47Z2pMnT/b6/u+3QoUKGTVtrCeHUKOtW7catRo1ahi17777ztYuXry40adgwYIe1fxd/fr1be0JEyYYfXr37n2/DgcumjRpYmu/9NJLHt1OC8Rq27atUdOCrQBP5MmTx6g988wzXt3XE088YdTOnj3r1X3h3tKmTWvUHn/8ca/v78KFC7Z2gwYNjD5ffvmlUcuSJYtR04LQPbk20YKOAwMDjVqKFHwXKaFoAdX+rkWLFj67LwL/Yi8kJMSotWzZ0qv7OnDggFF79dVXjZrzc6nmm2++MWp//fWXUfvss8+MWqVKlWztK1euxPh4iL3E8pnX03UrT3Tu3NmoVa1a1dbu0KGD0Wfbtm1G7datW14dA3xj2LBhXt92+PDhvjuQJISrPwAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABwwSI6AAAAAAAAAAAuEk2waPv27Y3atGnTbO3UqVMbfU6dOmXUtACFiIgIo3bt2jVbWwup0YKoRowYYdT69u1r1Lx19OhRn90X4o8WuHHo0KEEOBLfeP75572+rfZ7//7773E5nERnx44dRq1bt262dqNGjYw+77//frwdU2Jy6dIlW1sLFsX91adPH6M2ceJEW1sLHzt58qRRc4ZTiehBe4C3tPC00qVLx3g7ba5xhj7Df7zxxhv39fEWLFhg1LQxpYX25c2bN16OKamqWLGiV7fbsGGDUTt27FhcDydBaeNcO2d7YteuXUYtKQavxrfWrVsbteLFi8d4uyNHjhi1xo0bGzVvw17Xr1/vUT/tWENDQ21tgkXjTrtGfu2114xar169jJoWXps+fXpbOygoyOhz+fJloxYZGWnUtDBtZ+h2hgwZjD6eyp8/v62tjc3q1asbtXXr1nn9mIi9evXq2dpDhw71+r7iEkqalPFNdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABwkSB7onfq1MmoOfc/FzH3QNf2NfflXuQabc86bf92X3rkkUeM2sqVK23tGzduxOsxIOkrUqSIrV24cGGv7+vcuXNGzdu9//zJ33//bWtv3rzZ6DNu3Dij9sEHHxg1bU9L5x6stWvXNvoMHjw4psN05dwzTcud0Hz44YdG7aWXXrK1r1+/7vVx4d6yZ89u1N577z2j1qJFC6Pm3HN15syZRp98+fIZNe38q43ttWvX2trHjx83+gC1atUyauXKlYvxdn/99ZdR69+/v0+OCUnPjz/+aNQ++eQTWztdunRGn5YtWxq1XLlyxfh4WnbKrVu3YrxdclG3bl2jpuVYOS1dujQ+Due+0bIdnnjiCaOWKpX9Y7m2v7H2+a979+5G7fz587E5RIhIzpw5vbrdTz/9ZNSSw2eg5OrChQtG7d133/WoliNHDqNWoEABWzssLMzoo11LOz+DuilfvrytXaVKFaPPs88+a9Q8yQPQvPrqq0ata9euRu3mzZte3T9i5vx876nhw4f79kCSML6JDgAAAAAAAACACxbRAQAAAAAAAABwwSI6AAAAAAAAAAAuWEQHAAAAAAAAAMBFggSLPvnkk0ZNC7RzBpkNHDjQp8eRO3duW/u1114z+mjBL1oIjjNMTUTk7bfftrV79epl9Gnbtq1R6927t1HbsmWLrf3xxx8bfRCz6dOn29pPPfWU0cfTII233nrLqIWHh3t3YPHMGSIqIvLDDz/Y2kWLFvX6/p3jM7m6c+eOUbt27ZpRGzBggFf3/9tvv3lU89SxY8dsbS3wJjIy0qh99dVXRo0g0fjhDBoT0UNotQCyBg0aGLV169bF+JhaIJZ2zpk9e7ZRq1atmq09f/78GB8PSVvlypWN2h9//GHUtOvAWbNm2dra9SOSHy2QbPTo0UbtnXfeMWpNmjSxtbV5zFM7d+60tdu0aWP0uXLlitf3n9Ron588CRb1pE9ioYWIagG3WkC48/fUQkS1AMANGzbE4gjha3Pnzo3X+9euzTXbtm0zahcvXvT14SAOtIBQreZLzgBSLZBUm6P+/PNPo1aoUKEYH087D2bKlMmonThxIsb7gne0EG9PaK85dHwTHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAAAAAAAAAICLeA8WrVWrllHTNrt3hvOIiPTt29erxyxQoIBRq1evnlF79dVXbe3ChQsbfbRQl1GjRhk1LTzNGeD2/fffG31Onz5t1MLCwoxahw4dbO2pU6cafS5cuGDUYHfp0iVbWwun8pQznEpEZNq0aba2Flx6/vx5rx/TKTg42Kjlz5/fqM2bN8+oeRskevjwYaP24YcfenVfSPy08frXX38lwJEkD4GBgbb20qVLjT5aiKs2H2nhQd7KnDmzR/20IFokHylSmN/NcJ4XRfQQ0dWrVxs1Z5DouXPnvD84JBmejoOvv/7aqDVu3NhnxzFo0CBbe+/evT6776Ro9+7dRk0Lvvcnb7zxhq39xBNPGH20EFFPaJ8hJk2a5NV9IWbaZ3JPLFq0yKfH4QyU1z7za6ZPn27Url696pNjSs6CgoJsbeeajIhIv379jNrBgweNmvZ52bleFN/Kli1r1F544QWj5kmIqEZbJ9DW0+Ab2hqnVvMEwaKe45voAAAAAAAAAAC4YBEdAAAAAAAAAAAXLKIDAAAAAAAAAOCCRXQAAAAAAAAAAFzEe7Doa6+9ZtQsyzJqs2fPjvG+tPCZhg0bGrW3337bqGXIkCHG+//ll1+MmjMwRsS3ARAtWrQwat99951Rq127tq39ySefGH0effRRnx1XcqEFwpYuXdqj22bLls2oPfLII7Z2njx5jD6rVq0yagsWLDBqbdq0MWoBAQEx3v/DDz9sHqwPlSlTxqgRagvEXpYsWYzam2++aWtXq1bN6FOjRg2j5ssQUWeIkoh+ftGCgnbt2uWz44D/iYiIMGoPPPCAUdPOGYMHDzZqBIkmfs7rkvsha9asRu2ll14yalrQ7Z07d2K8/23bthm1mTNnGrXffvstxvvC//z4449G7ZlnnkmAI4lZq1atjNrrr79u1CpUqGBrO0MhRfTPvRpnkDIhovfXr7/+6tXt0qdPb9TOnDnj0W2dYfIiZnClFhJ45MgRo6aFViLunNcmw4cP9+h2NWvWNGravPLvv//a2ps3bzb6LFy40KPHfOWVV4yac/7Jmzev0SdTpkwe3b8ntHWIs2fP+uz+YedtiCjihm+iAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAAAAAAAAAICLeN8TvUmTJkZN2xuubt26Rm3FihW2trZXdWhoqFG7du2aUTt48KBRc+7ZpO11fuvWLaPmS6tXrzZqK1euNGqtW7e2tbU9cZs3b27UfvrppzgcXdI3YsQIo3bx4kWj9n//939e3b82rrWatidkcHCwUXPu7+nJ3p6e+vbbb41anz59jJr2/CDx0/IdtD25cf9ERkYatZCQEFtb21dTmxu0fVg15cuXt7W1vRFHjx5t1LR+2vx56tQpj44DScOAAQNs7R49enh0u48++sioLV++3CfHhPvr0qVLRq1OnTpGTduv17mftK95she1luPgvOYWETlw4IBPjik508aKJ3vqa3tOe8p5Ts2cObPRZ8iQIUZNu/71hPb7aPkhTz31lFFjD/SEpV1vLVmyxKg5P8dpeR6vvvqqUfNk/3MRkVmzZtna2vumZcuWRu369etGDXGnZaB5K126dEatXLly92yLeJ57p80/nmYyeOLw4cO29rhx44w+a9eu9dnjIWbaupK3hg0b5lENfBMdAAAAAAAAAABXLKIDAAAAAAAAAOCCRXQAAAAAAAAAAFywiA4AAAAAAAAAgIsAy8O0AU+CXzRaSErPnj09uu0///xjay9evNjos2zZMqPmDD0QEVm1apVHj5lYTZ061dZ+5JFHjD5aMM4777wTb8ckErewCm/HVHzTAvq05/all14yalpojC85nzNPn38t7O+3336ztZ9++mmjz9mzZ2NxdL7h7ZhKrOMpsfj000+NWr9+/WK83QcffGDUtBClxMrf5ihn8OfkyZONPr4M49PCibUgLW1+2LZtm8+Ow58k1zkqTZo0Ru3cuXO2durUqY0+v/76q1Hr3LmzUbtw4YL3B+fH/G2O8lb+/PmN2vfff2/USpUq5bPH1D4jzJ4929b+/fffjT579uzx2TEkhMQ6R2kBfVu3brW1M2XK5NF9ffPNNx71y5Mnj61drVo1o48vw/i08fTuu+8aNe0zbWKVXOYojRaS/NNPP9naV69eNfo4x7WISNq0aY1apUqVjJozSLRNmzZGnz///NOo+ZPEOkdpnKHYzkD1xMSXc9mCBQuM2htvvGFra+M8ISTnOcqXwbH169c3av4+13grpueVb6IDAAAAAAAAAOCCRXQAAAAAAAAAAFywiA4AAAAAAAAAgAsW0QEAAAAAAAAAcBHvwaJBQUFGrXDhwh7d1hkQmlxDp0REsmbNes+2iMjevXuN2vXr1+PtmESSd5BD9+7djVrevHlt7bfeesunj5kihf3vXrt27TL6jBkzxqht3LjRqK1evdp3B+ZD/hQ240+8DRYtXbq0UfOnQEl/n6Ny5Mhh1Bo2bOj1/R04cMDW3rFjh9EnMjLS6/tPDpLrHDVy5Eij9uqrr9raWiBj2bJljZoWxJZc+fschcTHn+aoYsWK2dr9+/c3+jz22GNGLSQkxKj58vfW7mvRokVGzRkk+t5773l1DIkZc5Rdrly5bO1p06YZfRo0aGDUnEHcIiJff/21Ufv4449t7cQS3uhL/jRHOdeyUqVK5dHtunTpYtQKFSoU4+20z2YZM2b06DGXLl1q1FasWGFra+NwwoQJRk1bQ7p165ZHx3G/Jec5yp/eS/6EYFEAAAAAAAAAALzEIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuIj3YFEkbck5yAHxg4CM+EGwaOwxpqBJDnNU5syZjdr+/fuNWmhoqK3dtGlTo8+vv/7qs+NKipij4GtJbY7KmTOnUdOCG8uXL+/V/V++fNmoTZo0yaidPHnSqN24ccOrx/QnzFHwtaQ2RyFhMUfB1wgWBQAAAAAAAADASyyiAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAAAAAAAAAIALgkURJwQ5wNcIm4kfZcuWNWqff/65rT1y5Eijz+LFi43axYsXfXdg8Yw5Cr6WHOaonj17GrWIiIgYbxcSEmLUrl696otDSrKYo+BryWGOwv3DHAVfY46CLzFHwdcIFgUAAAAAAAAAwEssogMAAAAAAAAA4IJFdAAAAAAAAAAAXKRK6AMAAMS/zZs3G7Vq1aolwJEASOxCQ0O9ut3gwYON2ptvvhnXwwEAAACABMc30QEAAAAAAAAAcBFgeRhnS3ItNKQhw9dIbIcvMUfB15ij4EvMUfA15ij4EnMUfI05Cr7EHAVfi2lM8U10AAAAAAAAAABcsIgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcpEroAwAAAACAxCpFCvN7RylTprS1b9++bfS5c+dOvB0TAAAA7i++iQ4AAAAAAAAAgAsW0QEAAAAAAAAAcMEiOgAAAAAAAAAALtgTPREKCAiIsY9lWffhSBAfPHl9RXiNASAmns6nGuZYIGkLCgoyavXq1bO1mzdvbvTp2rWrUUubNq1RO3HihK395ptvGn2++uoro3bt2jWjBgAiIqlTp7a1c+XKZfTJly+fUYuMjDRq+/fvt7WvXLkSt4ODzznzNgIDA40+t27dMmrOcSKiXxM7szq0nA7teljL+PDkdkBywDfRAQAAAAAAAABwwSI6AAAAAAAAAAAuWEQHAAAAAAAAAMAFi+gAAAAAAAAAALggWPQuWhiDM+zBrZYyZUqjliZNGls7ffr0MfYR0YMiLly4YGtfvHgxxj4iehAFIRD3po0DLeQjLCzMqOXJk8fWDg4ONvpoQR1nz541atpr7Hw9tYCYq1evGjUtRAQA4kKbK5017XwWEhJi1LTz0s2bN21t7dyruXHjRoz3JeJZaBIA/6Bdb33xxRdGrX379ra2Fj6q0a6nM2TIYGtXrlzZ6DN//nyjRrBo4kJANe4Hba2gTJkyRu2dd96xtUuXLm300T4jfvPNN0Zt0qRJtvahQ4eMPnxGvDfntad2LarNA9rrnT17dqPmfH2dawkiIiVLljRqBQsWNGrXr183amfOnLG1V61aFWMfEf34nUG1u3fvNvpoaxPMk/5BW++qVauWUevcubOtrb3mo0ePNmpHjx6Nw9ElLnwTHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAAAAAAAAAICLZBMs6klAqLaZvhYAoW2wX6FCBaOWNWtWW/vIkSNGn2zZshk1LbDSGfrxzz//GH0WLlxo1DZt2mTUCDT6H08CYUVESpQoYdQ6dOhg1JwBMVmyZDH6aOFUqVKZb0VtzJ4+fdrW3rp1q9Fn48aNRm3NmjVG7fDhw7a2FkaC/9HGijMgUQsP1t5vWqDhpUuXYuzn62AW5xjTAtby589v1HLnzm3U8ubNa2svX77c6OMMpBHR3w9IfLTgNe09kTZtWltbO59p59XMmTMbNWdw1rlz54w+2ntCey85504Rc87TwkeTGk8D1LWgMYKhkFg4Az1FRObMmWPUGjVqZNSc85Y2rrVztDaHzJ0719aeN2+e0Uc7x2nvQ95fsaM9h9rnOOc1fbp06Yw+Wti1dj2nhVY7A/m0gD7t+po5NmnTro+0wMh3333XqNWuXdvW1saFp5/ZnNdDcQnRTQ6058eT5yx16tRGTbvW7dGjh1GrWbOmrV28eHGjjzYfaWsH2lhxXts2adLE6HPixAmjtmTJEqN2/PhxW1u7fuT8lvh4+r7XxnHhwoWNWsuWLW3tq1evGn20YHeCRQEAAAAAAAAASAZYRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAF0kyWFQLOdACPpxBZgUKFDD6dOnSxahVr17dqJUuXdqoOUNjTp06ZfT5999/jZoWcOM8tjp16hh9nKGWInpgyerVq41acqWFcpQsWdKoac+jFibrDGTQxqIWTHTlyhWjpo1ZZ3hjtWrVjD5aON7JkyeN2ueff25rT5kyxeijBfQlh3AQ7XXLmDGjUXO+L51hQCJ6GOLatWuN2qFDh4za5cuXbW0t7Cwur4dz/Gvj6fnnnzdquXLlMmrOY9NCSmfOnGnUnOGRSHiehOiK6KFJzvNQ/fr1jT7ae0mbF53jX+uzc+dOo6a957Tz7+7du21tLfAmsc533l7nFCtWzOij/d5aOJ4WHORJUHlifQ7hH7Rw4i1bthg17bykhWk5z1Xbt283+kybNs2orVy50qg555qzZ88afRj/cae9js7AUBGRbNmyGbXy5cvb2tq1e6ZMmYyaFl6rBZceOXLE1tbmyT179hg1bdw5a85zoIgeSIrERwtLf/LJJ42a9pnTeb2yY8cOo8/SpUuN2i+//GLUnPMdwaKxp60VOGnXyNq6TOfOnY2a87N9cHCw0Ud73bTQam1+cH4ey5Ejh9FHG6/aNfeKFStsbW0e1o5Lu1bk3Hj/aM+1Nqa0cfDwww8btSxZstja2hqGtraVlPBNdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABw4fd7omv7+Tj3pRbR97YrW7asrd22bVujj7b/uXMfIDfOvX63bdtm9Pn666+NWmhoqFHr0aOHrV21alWjT5UqVYyac58tkeS9J7pzvOTOndvoM3LkSKOm7RWt7fns3HNK28Na2yPq8OHDRk3bC9G532POnDmNPunSpTNq2h5Xr776qq2dP39+o09ERIRR++eff4xaUtvXTPt9tD3qihcvbmtr40QbY9recCdOnDBqzj3p4/t51o5V29NPm083b95sa2/atMnoo41pJCztHKrNITVr1jRqvXv3NmrO/We18/H+/fuN2rFjx4xa2rRpbW1t/GfNmtWopU+f3qitWbPGqDn3HXXubZtYaPufa/tQFi1a1KhVqlTJ1i5RooTRp3DhwkZNe17//PNPo/bXX3/Z2tr+6lomR2RkpFHTzpee8HRedPbTxn5SO5f5I+f1yrp164w+efLk8ei+tL1ZJ02aZGu//PLLRh8tq0MbG849srX3qlZj7MWOthe5Nt917NjRqDVq1MjW1uY77XOXNna0zKELFy7Y2tr5UzsPXr9+3aitWrXK1v7iiy+MPto8fP78eaPGeLp/tP2wW7dubdQaNmxo1LT8kW+++cbW3rhxo9FH24NY+yzpHGfenmeTC21u1j7vO2m5etr+51p2h/Pzpbav+enTp42alr+gfVZ1zklaH+131PKLihQpYmtr+STaZz1P9+Jn3kpYnmbCOXMCtPGp1ZISvokOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwIVfBYtqoQQpU6Y0alo4Qo4cOYxawYIFbW0tMPTGjRtG7d9//zVqBw8eNGo//vijrb1+/XqjjxaupYVAtmnTxtbWngtnCJuI/vwkZ85wHy2M1RmMJ+JZqIiIGeCihUVqATHz5883alpAW4MGDWxtLTj2wQcfNGpaCJdznHXv3t3oowXQvPnmm0ZNCxHx53AQLVhD+32cwcNa4K8WRLV3716jpgVDaeEyvuR8fc+dO+fRMTgDRUTMOWnPnj0e3RcSlhbY1qRJE6PmDCIW8Szcb9myZUZtwYIFRk0LXHYG2GrzcLly5YyaMzBURD9vayFc/kKbj7QQH+d7XAuU0s6DnoSxi4iEh4fb2lrYmRacNnbsWKOmnS+dgX/Hjx83+jgDmEX0ayTn+dnT4ECtn8afz3kJRbtudYYraoFtGu38Mm/ePKM2cOBAWzsuQXvOzwjauPPkfIn/0a6/cubMadReeuklo9aqVSujps15TtrroY0n7dicY1ibL7RgUS2szRmCqp3f5s6da9Teeusto6ZdzzFH+YZzHBQqVMjo061bN6MWFhZm1MaNG2fU5syZY2tr10famNL6Oc9pjIHYc77eWrB7xYoVjVru3LmNmvZ52Xn/2nXU2bNnjZoW2q6FwjvPoVqosbZepF2THThwwNbWQri1+2fc+S8tDNf5ei5fvtzoowXfJiV8Ex0AAAAAAAAAABcsogMAAAAAAAAA4IJFdAAAAAAAAAAAXLCIDgAAAAAAAACAC78KFtVoQQVaUJoWbLVt2zZbWwtQ0AIgtNCyY8eOGbULFy7Y2lpIqacBodmzZ7e1tQAILRzJGQCR3DmDfJwhPiIi6dKlM2raONPCg3bv3m1rT5061ejzyy+/GDUtaFIbG87ALS0wyTlWREQmTpxo1EqVKmVra2Oqc+fORk0LCvz111+Nmjbe/YX2vFaqVMmoOUNoM2XKZPTRgmS1cDxtPMV3EIszzEb7vbUAHW1sOudYLdQKCc/5mmuBMc8995xR0wLOtEDt0aNH29paaLIWRKQFtjnDj7XxqZ3btbCltWvXGjV/maO0gDstuEm7NlmzZo2tXbJkSaOPNvdogUDaa+QMTSxevLhH96UFVGvH7zyfOc+xInrAmhZ6u2LFClv7t99+M/rs3LnTqGlzmRbg5rwGI0jLTnudPv74Y6NWsGDBGO9Le09MnjzZqPXv39+oxSVINCbaa64da3weg79xXk9o1+DaOalDhw5GTRtjzvlNC+jT5qgjR44YNS3E2PlZT7s+CgkJMWqlS5c2as4gZW3O7dixo1FbvXq1Ufvpp5+MmnbuRew51xm0QFvtXKh9Hvj222+NmvOco80XWgi2VmOuiTvnHKK9n7XrKO39pl2fOq99tOsQbZ1AuzYpWrSoUXPOedq19L///mvUvvrqK6O2fft2W1tbJ9POeVwPJT7auUq7dtbG+7Vr12ztb775xuiT1OcevokOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwIXfB4tqoStaQIwWwuUMFduyZYtHt9MCEzytOWnHX6hQIaOWNWtWW1sLRNuzZ49R279/f4zHkFQFBQUZtRIlStjajRs3NvpoAZvaa6mFyQ4bNszW3rFjh9FHC+PTwmC0EA7neNQCPbTQpPbt28d4rFpYUe7cuY3aiBEjjJoWSOIcj4k1xE8L1siSJYtR00JoncFBWqhxZGSkUdPCZhIidMU5/7Rt29boo4WlaseqzT9IfJwBMe3atTP6lC1b1qhpQUdaOKQzZFibo7T3XIYMGYyaM7hGC0NyhrqJ6MFNWuiTdv71F1pgj3aecl4DaEGvWqi69r7X5kXna6kdg3YeCQsLM2oPPPCAUXMGl2pjQAsi1MIpK1SoYGtrwX5aOJIWBu5J8GpSD1W6F+09Hh4ebtS6detm1JzXYNr55o033jBq7777rlG736+B9nsTqPY/2vPjDAN9+OGHjT69evUyatr1lnad+ffff9vaWgindg2rXb9rAWvOuUA7Jx09etSoaee8fv362drVqlUz+mjBga+//rpR0869P/74o1FD7BUuXNjWfuyxx4w+Wsjt4sWLjZr2OjnnDO1948kag3ZfuDdPPntrc492zXT69Gmj5gwPFjHPeZ5+Xtauo7SAdmegrbYOsX79eqOmzWXO54Lx5b+0z0Ddu3c3atp4d54ztc+ISX1s+O8nSAAAAAAAAAAA4hmL6AAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADgIkGCRbWADE9oG+Brm9ZrAWIaZziCdru4bIrv/D2dAVkiIunTpzdqDRs2NGrO2166dMno89tvvxm18+fPx3icSZUWNuYMFtOCQDRawOz27duN2uHDh23tQ4cOGX2cYXkingfEeEIL0tLCLcePH29rly9f3uhTqVIlo6aFsQ0ZMsSoffLJJ7b28uXLjT6JgTYf5ciRw6hVr17dqDlDXbRxooVTJRbOoMAyZcoYfbRAEe33/Oeff2xtfw5tTCq0kORixYrZ2q1btzb6aPPW1KlTjdrSpUuNmnN+0857Wjib9p5zBnNpYdFHjhwxalqgtjbvenqt4C+06xVnQJUW/rNv3z6jpp1HtNfS+Rxq510tkFQ75zkD1EXMMZwxY0ajT6tWrYxavnz5jJpz/GgBfdo1mXYd5Wmwa3KlveZ9+/Y1alpQrJMzGFJEZMyYMUYtId7PzvOcdt7TxkVyDSDVwhadgcIvv/yy0Uc7Z2ivtzZWxo0bZ2svW7bM6HP27Fmjpl3naK+vc17UgiK1MaDd14gRI2ztmTNnGn2094w2l7Vs2dKoESwae9p57/HHH7e1tflOCzh3XieL6NdpadKksbU9XetIatc0CUF7rp01LdAzbdq0Ri1nzpxGzZNrE+09nj9/fqN24MABo7Zhwwaj5gwSXbVqldFHCxHV5q3kcJ5KLpzzjIi+7qPNgZs3b7a1nQHbyQGrHAAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADgIkH2RNdo+wM6a9q+YRpP93By9vNkHyztuNxqzr2Etb08nXsBipj7douY+28ePXrU6PPHH38YNed+qMnJzZs3jZrzudXGlLbXqbaf7vHjx42acy9hbY+ohNg3VXvMXbt22dqzZ882+mjjMzQ01KgVLlzYqD311FO2dmLdE117j584ccKoaePJuael9jxny5bNqHk6h3i795x2X9reZ+3bt7e18+bNa/TR9u3U3iPOnAZtTz9tv0b2EY4/2h6NVapUsbUzZMhg9Nm6datR0/Z51faPdu6JrZ33tDlEGwcXL160tY8dO2b00fay1TJDtDGbHDh/b+258eT6SLsv7bZa/oa2x76nnPNIkyZNjD4lS5aM8bhEzOsm7Zy0ceNGo6btjcy89T/a+SZ37txGTdsLVrutc4w++uijRp8rV67E5hDvSTsGLX9BqznnQG0+3bt3r1HTrimTAy2H6KGHHrK1tWsm7f2mXadFREQYtV9++cXWPnfunEf37+l73NvrNO3+T506ZWs7r9NFROrWrWvUPH0PIva059G537w2BpwZWSIiuXLlMmraXOa8Xteufc6cOWPU2Cc9fjjXCrRsBy3TJXv27EZNO0c4r3O0z09aLkSJEiU8Og7n3umejgn2P086tHNEoUKFjJo2prTx4szX0NZIkjq+iQ4AAAAAAAAAgAsW0QEAAAAAAAAAcMEiOgAAAAAAAAAALlhEBwAAAAAAAADARbwHi2ob2adKZT6sFl7nSWifFgypBT5onJvgexoio92/dvzO8DRts/5SpUoZNS2IwhmEM3PmTKPP9u3bjVpyDoXQgpucoXfa+NHGrBaotmzZMqPmDMRKzM+/c/z/9ddfRh8tEKt48eJGTQswPHLkSByOLmGdP3/eqGnhcs7XV5vbtBBFLcBTCwF2vkbaeNICaHLmzGnU6tWrZ9SeeeYZW1t7HTVagEhYWJitrQWZehram1xDIONCOwdpIW7OIBktRHT37t1GTQu/0oIC69SpY2trY3bTpk1GbefOnUbNed5zhq6JEPoYE+fz78k8JuJ90LH23tVq2v1r52NnMG2LFi2MPto41AJU582bZ2tPmDDB6KOFFRLMdm/a3NOuXTuj5jxHiOjv1aVLl9ra//77r9FHGz8a7XrdeX4sU6aM0Uc7F2rH6gxV10IrtfPlnj17jFpyOO9p1yZdunSxtbUAV23e0s5TK1asMGrO6znt/ZxYrtWdx6GNX23sa2Nzx44dvjuwZEJ7bl955RWj5ryGd4agi4hcvXrVqGnjXwuazJIli62tzYFaCLY2/yDunHPGyZMnjT7aedAZPC2iB4Q63+fa+Ue7f21+qFChglF77LHHYjyGtWvXGjUtHNc5FyeWuRP3pl1f9+vXz6hpY0Oba5zXaclxHPBNdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABwwSI6AAAAAAAAAAAuEiRYNDAw0Khp4QvOwAQttE8LTtNC+7RwP2dglbYpvhZOpQV3aEE1efLksbWdgW4ierCo5pdffrG1nQFZIiJnz541aslxo/970cLxnLQxqwXaacGiWviRtzwJdvM2/E3rd/ToUaPPgQMHjJrzfSOih+VcvnzZo+NIjLQAjuPHjxs1ZxioFojVunVro6aFQGnBQVmzZrW1q1atavTRxoAW4KbNsc550dNQZu34nXOgdgzafMoc5Rvaa6eFWTvHlHbu0s61mpIlSxo1Z8ijFkx05swZo3b69Gmj5jzXaiG02lhkTLnz9vwQF56GiGrXc08//bSt3aZNG6OPNvbHjx9v1D799FNb+9ixY0Yf7f1AUG3stWrVyqhp84p2zfTbb7/Z2tr1u3bt/Pzzzxu1Bg0axHgc2nlpyZIlRk3jHNtaiKh2X8l1jmrUqJFRy5YtW4y3064nIyIijNrevXuNmhbanlg5Q90qVark0e20zzba5xbcm3YN3759+xj7adcm2mcGbX6oXr26UUuTJo2tXbZsWaOPdv310UcfGTVnwGlynXviwnkNoF2vap+XtQBSLTTUk89i2nWI9r7XgiFr1qxpa9eoUcPoo60BTJ8+3ajNnz/f1tbWnrTnJzkEZydm+fPnN2ra9bR2va4Fdp84ccI3B+bH+CY6AAAAAAAAAAAuWEQHAAAAAAAAAMAFi+gAAAAAAAAAALhgER0AAAAAAAAAABfxHiyqhYhmyJDBqGkBaIULF46xjxZIo4VtaEEOzgApLbRBC0zYv39/jPclIlK8eHFbO1euXEYfLdDo4MGDRm3RokW2thYAQfiVnfaapEhh/7uRFqCghV/s2LHDo/t3vga+DnBxHn9c7t/5uzsDAUVEqlSpYtSyZMli1LQQEWf4SGKlPYdaiJUW5usMa9He41rIS8eOHY2a87UVMV8jTwMftXlFC6BxBm5pYTba+0EL5HPOW1qgjha+RMhR7GljRQuO1UKAnUGfuXPnNvpkz57dqGmvpzY2du3aZWtroW5aII02Pp3jRRsrjB//pM01vXr1MmqdOnWytbXw0R9++MGoOUNERcyxr10zcR0Ve9p1vnbN7WkwmnNe6devn9HnhRde8OgxPbnG046/UKFCRk0LanSGdWm/j6eB3UmN9tzXrVvXqDnHhXZe2bJli1H7448/jJp2jZFYaeHK3bp1s7W1z7Ma7Zz6+++/e3dgyVidOnWMmrbO4ByjzuseEZHly5cbNS1oXbtedwY/tmzZ0uhTrlw5jx7zyJEjtjbXTLHnnMu0c9nOnTuNmrZ2oJ2nnJ+htcDtpUuXGjXt3KWF0BYsWNDW1q7xCxQoYNSeffZZo9awYUNb++effzb6aJ//tbUtbR0FvuEcs3369DH6aONAe01mzZpl1AiK5ZvoAAAAAAAAAAC4YhEdAAAAAAAAAAAXLKIDAAAAAAAAAOCCRXQAAAAAAAAAAFzEe7BoWFiYUdNCD7Qwj9q1a9va2ib2WkBDmjRpjJoW7OMMhtAC0LQADi08MG/evEbNGYClBVGcO3fOqGnBI1pwo5MW4qPVkkuoiCfPtxagoD1nWuBG1apVjZozcFELB/GUp6+nt5yBJFqoW+bMmY2aFob077//GjUt6MhfaOFUP/74o1ErU6aMrV2vXj2jjzPQJTa0AEmn3bt3G7U1a9YYtaxZsxq15s2b29rafKoFSk6ZMsWo7du3z9bWAta092RymY802vvZ+ZprY0B7nbRzUGhoqFG7evWqra29d7Xz0vHjx42aJjg42NbWgo8uXLgQ43GJmPNzch4r/ky7Jmvbtq1R04KPnPOWFvb+zjvvGDUtfN15Dcl48g3t9dUCYLXnW7tGatasma394IMPGn20wG7t/KJd4x06dMjW1sLftLlZC1V3BjNrc5t2/Z4cAmy11zsyMtKo3bx5855tEZH169cbNS2QMbHSxpN2bfjyyy/b2tr1tvaeeeutt4zaP//8E5tDTHa012TQoEEe3fby5cu29qpVq4w+y5YtM2raeWn16tVGbd26dbZ2hQoVjD7OUGMRkSJFihg1X35uTK6c17FBQUFGH62mhWlq19Jbt261tVesWGH00a59tNdWW6OqX7++ra193s+ZM6dR0+aawoUL29rt27c3+ni6jqWdL7ku8w3nmG3durXRRzu/bNu2zaht3rzZdweWhPBNdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABw4fM90Z37M9WqVcvo07RpU6Om7Xl45swZW3v//v1GH23vVm0fdm0/deeeTdoehVWqVDFq2r5R2nGcOnXK1tb2Zdf2jdbuy7nnZKpU5kun7Z2r/d7JmXMMafvga/t7ZsyY0ahVqlTJqP3666+2tra3o6d7YWr7gnmyV5jWRxsbRYsWtbW1XAJtnGnH/8cffxg1bS9tf6E9h9qezc69eL/66iujT44cOYyatue6Nhad+4dq+9Npe49r73ttvDrnXW2vbef+sSL6XpvO+c7TjInkQtu70Ll/uIi5B7Q2H2ljStsHUXtM59522jlI229Y2ztPmx+cr7E2PrUa4yVp0M412n79gwcPNmrO/aVFzDlwxowZRh9tv0bt3Mt4ih/avuPaONDe4xpn1oiW7aCd4/7++2+jpu2X7/xs4Xw8Ef2zS+XKlY2a85zp3NtWxL+vhXxNO+c5x492feRpnkpieI9r510tk8a5/7mIOVdqv4+21/bvv/9u1Dx9vyVX2lj0ZHyKmPs7Oz/7iYgcOXLEqGmfI7Tx4pxH1q5da/TJli2bUcuQIYNRQ+xo17rOdZ9SpUoZfbR1Am3u37Jli1FzXsM499wX8XztYOfOnUbtwIEDtraWpfXoo48aNe2c53x+tOs27fyp7fN+8eJFo5YY5vCkIF++fPdsi+jP9ezZs42adk4G30QHAAAAAAAAAMAVi+gAAAAAAAAAALhgER0AAAAAAAAAABcsogMAAAAAAAAA4MLnwaLOMKHy5csbfZo3b24eiBJQ5gxK2bZtm9Hn7NmzRk0L8NTCRp39tFAILdxBOw5nUJGIGTikhcFpAWsHDx40as5gEy34Qgty0vppISlJkRaYMH/+fFv7ySefNPpooXqZMmUyalpYWoECBWztHTt2GH20cEhvA5K0QBqtlj17dqM2YsQIWzt//vxGH+19qQURaqE3SS3UyJOwUW1u0EI4vQ1O8fR22vteo815Ttp48iTMlHAYO+010QLzsmTJYms/8MADRh8tyOfYsWNGbcOGDUbNec70dKxo5y9tXkyXLp2tvWnTJqOPdl4lBDtpCAwMNGqNGjUyas5zpYgeqr5gwQJbe/z48UYfbT5i/rl/goKCjJr2mmjB1dp4cc6LWkjs4cOHjdp7771n1I4ePWrUnIF8jRs3NvpoQdxaaJ/zOkc7hqR2LRQX//77r1GrUqWKra2dH7RaYvks47xGypw5s9Hn1VdfNWpdu3Y1as73gxa8N3LkSKNGeG3sade2ly5dMmra+9c5HrX5SPusp9HOVc4QyQsXLhh9tGsmbS0C7rQxoIXLOj8fly1b1uiTK1cuo6a9Htq6gPOztvbZW5vvtLBR7XdyjhXtWkubY53X8yIiadKksbW10EltPUqrwTe05zY8PNzW1ta2tDnq22+/NWqehtomN4xoAAAAAAAAAABcsIgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC58HizqDMjQgse0MDUtuMN5X84QPxEzIEhEDyHSwkadx6FtsL98+XKjpgWIaL+TM3hS2/jfGSInIpI+fXqj5gxk0u5L+x0JA7A7dOiQra2NO+251QIZypQpY9QeeeQRW3vChAlGHy3oytMAGmcIoBbKpQWBjBo1yqg5A361cDAtZGfQoEFGbdmyZUYtOQa7eRsQez+OQ5sXnQExnt6XFi5LeNq9eRr4W65cOVtbC2DUgny081JkZKRRc84hWui2FmZavXp1o6adX5yB4FqwrnYuT47zRVLgHE/aOOnWrZtR085d33//vVGLiIiwtbVxznVOwtLmIy2kLGfOnEZNC7d2zgVaSOmePXuMWuXKlY1aixYtjFr9+vVtbe1zihYupwX5LV269J5tEea2ux08eNCoOd+/WkBfvXr1jNqnn35q1LTrEG/nB+2crX3OKl++vK39yiuvGH3q1Klj1LQx5rzmfuKJJ4w+2vU2wdyxp42LvXv3GrUHH3zQqDmvp7XPSp6+77Vx5lzbcK4niOihszt37jRqnB/dac+99rnIeY5wvudF9HlLC/DUznk1atSwtU+dOmX02bRpk1HTrqVPnz5t1JzX9I8++qjRR5tjtTUqTwJCtet+7ToBvqGtUTmvu7XX7cCBA0btyJEjvjuwJI5vogMAAAAAAAAA4IJFdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABw4fNgUWeAhRYIpAUhaKEozjCPCxcuGH208CItWEY7DmfAjXZf+/fvN2paSIcWTJQqlf3p1QJDtTBH5+20floYnBYyov3eyZkzwHPixIlGHy0USAsa0YJeOnfubGvnyJHD6DN37lyjpoWlaaG5zgDbIkWKGH20cJBatWoZNWeokTauf/75Z6Omhe0yzhIXLdBICxVxhgJqY+DkyZNGTetHsNX/aGFFWk07b1StWtXW1s6X2vtNCynTAkJLlChha7dr187oo809WiDc7NmzjdrixYttbe28TdCVf9KuTZxBuG+88YbRp1SpUkZtw4YNRm3cuHEx9mPsJD5aYNjXX39t1J5//nmjpl1bOc9VWh8tqLFBgwZGzXmO02jB7lpQ4FdffWXU3nzzzRjvC/+zYMECo9a7d29bWwvcLl68uFF75plnjNrkyZON2rFjx2xt7VpFO39qY2zQoEFGrUKFCra2Nl61xzx+/LhRe/31121tbcxxreUb2vOohba2bdvWqKVLl87WbtasmdFn2rRpRk1bn8iTJ49Rc44zrc+KFSuM2rp164wawcbutOdGe42cAaHatbv2eVy7XnFe44vo11ZO2njVzr1aaLszeFILSNZqGufvpIWnbty40ahpa2yMTd+oUqWKUXN+jtOe67Vr1xo17TMndHwTHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABc+HxPdCdtX8TcuXMbtYwZMxq1s2fP2to5c+Y0+uzatcuorV+/3qhpe885973S9mvS9rPS9q66deuWUXPuh71mzRqjz969e42ac+8qEZGbN2/a2tqee87nS4T9Q52ce4pp+zPWr1/fqHmyp7iI+Zp36NDB6NOmTRujpu2jqe3l6dyzTNvjShuf2l5nzr3UnHsZi4i89NJLRu3IkSNGjXGW+HmyN6y2b7q2p7W2Z6Bzz+/kvNedp7+7tpegM9siQ4YMRh9tj2ltv0Tn/uciIpUqVbK1tfPxpk2bjNqECROMmjZnOMcLc4N/0s4jBQoUMGpjxoyxtZ3jS0TkzJkzRm348OFGTRt3jJ/ET7v+nTVrllFr1aqVUdPmKGcGkDYWtX1fPeW8nl65cqXR59133zVqq1evNmrOzw3J+bznCef+5CLmPuDa2NE+I/bp08eoaXtTO89J2p6vWsZRvnz5jJozl0jEvG7S7l/7XNqrVy+j5vxMyHiKP9o105YtW4ya9jm9bNmytvZrr71m9OnYsaNR09YitOs5557rBw4cMPpo7xPtXAt32vtL22d83759tra257d2LsucObNR0z6Pa5lJnvTRjt/TTCZP7ktbm3CuAYwfP97os2PHDo/ui/kt9rTx069fP6PmvEbSzku//fabUeM18RzfRAcAAAAAAAAAwAWL6AAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADgIt6DRbVAjqlTpxo1LazFGV6nhRedPn3aqJ08edKoaeF4V65csbU9DbDSAvq0oD1nkKgWGOoMDxExQ5VEzON3hs+51QgIsHM+H7t37zb6fPTRR0Ytf/78Rk0LInKGjXobICLi2WunvSe08I4TJ04YNWcQjhYC7HyPeHpcSFjaGNPmLWewkjZ2tGCcsLAwo3b48GFbm0BAOy20VXt/rVixwtauU6eO0UcLG61evbpR096re/bssbW1cNAZM2YYte3btxs1LaiG+cH/aPOFdr3yzDPPGLXy5cvb2to8M3fuXKO2atUqo+YMfIT/2rVrl1Fr166dURs1apRRq127tq2tzXdasLs2frR5yxmENnv2bKOPMzBUhHOaL2jP4Z9//mlrv//++0afQYMGGTXt2kSrOWnznXZ+1s5lWhil89pHO/6IiAijpo0x3D/aWHQGSIror+ezzz5raz/44INGH62mzVGnTp0yas7rtNGjRxt9lixZYtS08YnY0dZznJ+hv/jiC6OPdj3ftm1bo6atJ6RJk8bWjsvagTaunbfVfkdt7Wz+/PlGbfLkyba2dq7Xwln5bBB72mueM2dOo1agQAGj5pwLzp49a/TZsGGDUeM6x3N8Ex0AAAAAAAAAABcsogMAAAAAAAAA4IJFdAAAAAAAAAAAXLCIDgAAAAAAAACAiwDLw53+PQ00cNLCWrQwzbRp0xo156b4WmCGFoSn/UpaAKMz4MPT31Hr50lQTapUZo6rVtMCJZwb/Wshotrzo/3evhSXoAhvx1R808ZstmzZjFrfvn2NWq9evWztrFmzGn20QCxPx6wzGGLRokVGnw8//NCoaYEhR44csbUTSyCNt2MqsY6nhBAYGGjU2rRpY9SmTZtma2uBImvXrjVqgwcPNmr//POPra0FyySExDJHafN6xowZjVrBggVjvJ0W3pg+fXqjpr2ezjDlgwcPGn20146wmf9JanOUNl+ULVvWqE2aNMmoFSlSxNY+evSo0adr165GbcuWLUYtsZyD7rfEMkf5E+33Jrjsf/xpjnI+ZlBQkNGnZMmSRk0LG61atapRcwbTap+7tJDP/fv3G7Xff//dqM2ZM8fW3rt3r9HH30OTk/McpZ0fneF+zoBtEZGKFSsaNe06ShsvzuDtQ4cOGX20cEh/4s9zlHYNrn2219YOihcvbtSKFi0a431pj3np0iWjpq1hOOfAZcuWGX3+/vtvo6aNMefng8Ry3ZYU5yjtNc+TJ49R+/TTT42a8/Pl8uXLjT5vvvmmUbtw4UJsDjFJi2lM8U10AAAAAAAAAABcsIgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC7iPVg0LvflSb/4DhLy9P69fX68fS604/K05ktJMcjBU1rgQ5o0aWxtLUBEC8PVgkAiIyONmjNY1NPwRn8K3PKnsJnESgtCatmypVFzhtBqgSIrVqwwahMnTjRq27dvt7W1cZ4Q4ZSJZY7S7kt7nZwh21of7b60IOKrV68aNeecQWBo7CW1OUoLpR09erRRa9eunVFzhgBu27bN6NO2bVujduLEiVgcYdKWWOYoJB3+PEdpx6BdI2vB3FogX65cuWxtZ4ifiBm4LSJy5MgRo3bmzBmj5jyn+tP1tqeYo+Br/jxHIfFJLnOUdqzOtScR8/lILJ/J/QnBogAAAAAAAAAAeIlFdAAAAAAAAAAAXLCIDgAAAAAAAACACxbRAQAAAAAAAABwkSDBokg6kkuQA+4fwmbiR+rUqY3agw8+aGvfvn3b6LN//36jdv78eaN26dIlWzuxhGv52xzl7WMmluc7OfDnOUo7hgwZMhi1b775xqhVqFDBqB0+fNjW7t27t9Fnw4YNRo1Ao//xtzkKiZ8/z1FxkTJlSqPmyVzD+fPemKPga8l1jkL8YI6CrxEsCgAAAAAAAACAl1hEBwAAAAAAAADABYvoAAAAAAAAAAC4YE90xAl7UMHX2Ccv8dOe68S6pyhzFHwtqc1RuXPnNmrdu3c3avv27TNqzv3ODx06ZPS5fv16HI4u6WOOgq8ltTkKCYs5Cr7GHAVfYo6Cr7EnOgAAAAAAAAAAXmIRHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcECyKOCHIAb5G2Ax8iTkKvubPc1TKlCmNWmBgoFFLlSqVUbt586ZRu337tq1969atOBxd8sQcBV/z5zkKiQ9zFHyNOQq+xBwFXyNYFAAAAAAAAAAAL7GIDgAAAAAAAACACxbRAQAAAAAAAABwwSI6AAAAAAAAAAAuPA4WBQAAAAAAAAAgueGb6AAAAAAAAAAAuGARHQAAAAAAAAAAFyyiAwAAAAAAAADggkV0AAAAAAAAAABcsIgOAAAAAAAAAIALFtEBAAAAAAAAAHDBIjoAAAAAAAAAAC5YRAcAAAAAAAAAwAWL6AAAAAAAAAAAuPh/8rgc5cXtSgcAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# --- Plot the results ---\n", - "print(\"Displaying results...\")\n", - "fig, axes = plt.subplots(2, 10, figsize=(15, 3.5))\n", - "\n", - "for i in range(10):\n", - " # Plot original images on the first row\n", - " axes[0, i].imshow(image_batch[i, ..., 0], cmap=\"gray\")\n", - " axes[0, i].set_title(f\"Label: {labels_list[i]}\")\n", - " axes[0, i].axis(\"off\")\n", - "\n", - " # Plot reconstructed images on the second row\n", - " axes[1, i].imshow(reconstructed_images[i, ..., 0], cmap=\"gray\")\n", - " axes[1, i].axis(\"off\")\n", - "\n", - "# Add row labels\n", - "axes[0, 0].set_ylabel(\"Original\", fontsize=12, labelpad=15)\n", - "axes[1, 0].set_ylabel(\"Reconstructed\", fontsize=12, labelpad=15)\n", - "\n", - "\n", - "plt.suptitle(\"VAE Inference: Original vs. Reconstructed MNIST Digits\", fontsize=16)\n", - "plt.tight_layout(rect=[0, 0, 1, 0.96])\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## **Conclusion**\n", - "\n", - "This notebook demonstrated the complete workflow for the Bonsai VAE model:\n", - "\n", - "1. **Instantiated the VAE model** with a specific configuration.\n", - "2. **Loaded and preprocessed** the MNIST dataset.\n", - "3. **Defined a loss function** and a JIT-compiled training step.\n", - "4. **Trained the model** to reconstruct digits and structure its latent space.\n", - "5. **Generated new, plausible handwritten digits** by sampling from the latent space." - ] - } - ], - "metadata": { - "jupytext": { - "cell_metadata_filter": "-all", - "default_lexer": "ipython3", - "formats": "ipynb,md:myst", - "main_language": "python" - }, - "kernelspec": { - "display_name": "bonsai", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.13" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/bonsai/models/vae/tests/run_model.py b/bonsai/models/vae/tests/run_model.py index b8218a11..dabe8955 100644 --- a/bonsai/models/vae/tests/run_model.py +++ b/bonsai/models/vae/tests/run_model.py @@ -1,34 +1,59 @@ +# Copyright 2025 The JAX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +import jax import jax.numpy as jnp -from flax import nnx +from huggingface_hub import snapshot_download from bonsai.models.vae import modeling, params def run_model(): - # 1. Create model and PRNG keys - rngs = nnx.Rngs(params=0, sample=1) - config = modeling.ModelConfig(input_dim=28 * 28, hidden_dims=(512, 256), latent_dim=20) - model = params.create_model(cfg=config, rngs=rngs) + # 1. Download safetensors file + model_ckpt_path = snapshot_download("stabilityai/sd-vae-ft-mse") + model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path) # 2. Prepare dummy input - batch_size = 4 - dummy_input = jnp.ones((batch_size, 28, 28, 1), dtype=jnp.float32) - sample_key = rngs.sample() + batch_size = 1 + image_size = 256 + dummy_input = jnp.ones((batch_size, image_size, image_size, 3), dtype=jnp.float32) # 3. Run a forward pass print("Running forward pass...") - reconstruction, mu, logvar = modeling.forward(model, dummy_input, sample_key) + modeling.forward(model, dummy_input) print("Forward pass complete.") - # 4. Show output shapes - print(f"\nInput shape: {dummy_input.shape}") - print(f"Reconstruction shape: {reconstruction.shape}") - print(f"Mu shape: {mu.shape}") - print(f"LogVar shape: {logvar.shape}") - - # The reconstruction is flattened, let's show its intended image shape - recon_img_shape = (batch_size, 28, 28, 1) - print(f"Reshaped Reconstruction: {reconstruction.reshape(recon_img_shape).shape}") + # 4. Warmup + profiling + # Warmup (triggers compilation) + _ = modeling.forward(model, dummy_input) + jax.block_until_ready(_) + + # Profile a few steps + jax.profiler.start_trace("/tmp/profile-vae") + for _ in range(5): + logits = modeling.forward(model, dummy_input) + jax.block_until_ready(logits) + jax.profiler.stop_trace() + + # 5. Timed execution + t0 = time.perf_counter() + for _ in range(2): + logits = modeling.forward(model, dummy_input) + jax.block_until_ready(logits) + print(f"2 runs took {time.perf_counter() - t0:.4f} s") if __name__ == "__main__": diff --git a/bonsai/models/vae/tests/test_outputs_vae.py b/bonsai/models/vae/tests/test_outputs_vae.py new file mode 100644 index 00000000..bda3269c --- /dev/null +++ b/bonsai/models/vae/tests/test_outputs_vae.py @@ -0,0 +1,51 @@ +# Copyright 2025 The JAX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import jax.numpy as jnp +import numpy as np +import torch +from absl.testing import absltest, parameterized +from diffusers.models import AutoencoderKL +from huggingface_hub import snapshot_download + +from bonsai.models.vae import params + + +class TestModuleForwardPasses(parameterized.TestCase): + def _get_models_and_input_size(): + weight = "stabilityai/sd-vae-ft-mse" + model_ckpt_path = snapshot_download(weight) + nnx_model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path) + dif_model = AutoencoderKL.from_pretrained(weight) + + return nnx_model, dif_model + + def test_full(self): + nnx_model, dif_model = TestModuleForwardPasses._get_models_and_input_size() + device = "cpu" + dif_model.to(device).eval() + + batch = 32 + img_size = 256 + + tx = torch.rand((batch, 3, img_size, img_size), dtype=torch.float32) + jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) + jy = nnx_model(jx) + with torch.no_grad(): + ty = dif_model(tx).sample + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=9e-1) + + +if __name__ == "__main__": + absltest.main() diff --git a/pyproject.toml b/pyproject.toml index 2061c1bf..6a950a54 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ test-env = [ "torch", "timm", "h5py", + "diffusers[flax]", ] testing = [ From 6de7e1545293c8b590fa465623c73b150b9cdca4 Mon Sep 17 00:00:00 2001 From: jaewook Date: Thu, 8 Jan 2026 15:41:49 +0900 Subject: [PATCH 2/4] run a pre-commit hook --- .../VAE_image_reconstruction_example.ipynb | 34 ++- .../tests/VAE_image_reconstruction_example.md | 261 ++++++++++++++++++ .../vae/tests/VAE_segmentation_example.md | 174 ------------ 3 files changed, 285 insertions(+), 184 deletions(-) create mode 100644 bonsai/models/vae/tests/VAE_image_reconstruction_example.md delete mode 100644 bonsai/models/vae/tests/VAE_segmentation_example.md diff --git a/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb b/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb index 26737ee4..8ab7fab3 100644 --- a/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb +++ b/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb @@ -4,7 +4,9 @@ "cell_type": "markdown", "id": "e320f2616490638c", "metadata": {}, - "source": "\"Open" + "source": [ + "\"Open" + ] }, { "cell_type": "markdown", @@ -20,7 +22,9 @@ "cell_type": "markdown", "id": "457a9ff4dbb654d7", "metadata": {}, - "source": "## **Set-up**" + "source": [ + "## **Set-up**" + ] }, { "cell_type": "code", @@ -62,7 +66,9 @@ "cell_type": "markdown", "id": "7efb43325c1f570c", "metadata": {}, - "source": "## **Download Sample Images**" + "source": [ + "## **Download Sample Images**" + ] }, { "cell_type": "code", @@ -111,7 +117,9 @@ "cell_type": "markdown", "id": "6beb39b427edc794", "metadata": {}, - "source": "## **Load VAE Model**" + "source": [ + "## **Load VAE Model**" + ] }, { "cell_type": "code", @@ -120,10 +128,8 @@ "metadata": {}, "outputs": [], "source": [ - "from flax import nnx\n", "from huggingface_hub import snapshot_download\n", "\n", - "from bonsai.models.vae import modeling as model_lib\n", "from bonsai.models.vae import params\n", "\n", "\n", @@ -145,7 +151,9 @@ "cell_type": "markdown", "id": "f6d864491e958d33", "metadata": {}, - "source": "## **Image Preprocessing**" + "source": [ + "## **Image Preprocessing**" + ] }, { "cell_type": "code", @@ -169,7 +177,9 @@ "cell_type": "markdown", "id": "5265f8b4e749fbb6", "metadata": {}, - "source": "## **Image Postproessing**" + "source": [ + "## **Image Postproessing**" + ] }, { "cell_type": "code", @@ -192,7 +202,9 @@ "cell_type": "markdown", "id": "37e8fe3fd0967830", "metadata": {}, - "source": "## **Run Reconstruct on Sample Images**" + "source": [ + "## **Run Reconstruct on Sample Images**" + ] }, { "cell_type": "code", @@ -264,7 +276,9 @@ "cell_type": "markdown", "id": "3b8f6910319ce5a6", "metadata": {}, - "source": "## **Batch Processing**" + "source": [ + "## **Batch Processing**" + ] }, { "cell_type": "code", diff --git a/bonsai/models/vae/tests/VAE_image_reconstruction_example.md b/bonsai/models/vae/tests/VAE_image_reconstruction_example.md new file mode 100644 index 00000000..73ad8798 --- /dev/null +++ b/bonsai/models/vae/tests/VAE_image_reconstruction_example.md @@ -0,0 +1,261 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.18.1 +--- + +Open In Colab + ++++ + +# **Image Reconstruction with VAE** + +This notebook demonstrates image reconstruction using the [Bonsai library](https://github.com/jax-ml/bonsai) and the [sd-vae-ft-mse](https://huggingface.co/stabilityai/sd-vae-ft-mse) weights. + ++++ + +## **Set-up** + +```{code-cell} +!pip install -q git+https://github.com/eari100/bonsai@vae-weights-and-tests +!pip install -q pillow matplotlib requests +!pip install -q scikit-image +``` + +```{code-cell} +import os +import zipfile + +import jax +import jax.numpy as jnp +import matplotlib.pyplot as plt +import numpy as np +import requests +from PIL import Image +from skimage.metrics import peak_signal_noise_ratio as psnr +from skimage.metrics import structural_similarity as ssim +from tqdm import tqdm + +print(f"JAX version: {jax.__version__}") +print(f"JAX device: {jax.devices()[0].platform}") +``` + +## **Download Sample Images** + +```{code-cell} +def download_coco_test_set(dest_folder="./coco_val2017"): + if not os.path.exists(dest_folder): + os.makedirs(dest_folder) + + url = "http://images.cocodataset.org/zips/val2017.zip" + target_path = os.path.join(dest_folder, "val2017.zip") + + print(f"Downloading {url}...") + response = requests.get(url, stream=True) + total_size = int(response.headers.get("content-length", 0)) + + with ( + open(target_path, "wb") as f, + tqdm( + desc="Progress", + total=total_size, + unit="iB", + unit_scale=True, + unit_divisor=1024, + ) as bar, + ): + for data in response.iter_content(chunk_size=1024): + size = f.write(data) + bar.update(size) + + print("\nExtracting files...") + with zipfile.ZipFile(target_path, "r") as zip_ref: + zip_ref.extractall(dest_folder) + + os.remove(target_path) + print(f"Done! Images are saved in: {os.path.abspath(dest_folder)}") + + +download_coco_test_set() +``` + +## **Load VAE Model** + +```{code-cell} +from huggingface_hub import snapshot_download + +from bonsai.models.vae import params + + +def load_vae_model(): + model_name = "stabilityai/sd-vae-ft-mse" + + print(f"Downloading {model_name}...") + model_ckpt_path = snapshot_download(model_name) + print("Download complete!") + + model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path) + + print("VAE model loaded_successfully!") + + return model +``` + +## **Image Preprocessing** + +```{code-cell} +def preprocess(image): + image = image.convert("RGB").resize((256, 256)) + + # normalization: [0, 255] -> [0, 1] -> [-1, 1] + image = np.array(image).astype(np.float32) / 255.0 + image = (image * 2.0) - 1.0 + + # add dimension: (256, 256, 3) -> (1, 256, 256, 3) + return jnp.array(image[None, ...]) +``` + +## **Image Postproessing** + +```{code-cell} +def postprocess(tensor): + # restoration + tensor = jnp.clip(tensor, -1.0, 1.0) + tensor = (tensor + 1.0) / 2.0 + tensor = (tensor * 255).astype(np.uint8) + + # (1, 256, 256, 3) -> (256, 256, 3) + return Image.fromarray(np.array(tensor[0])) +``` + +## **Run Reconstruct on Sample Images** + +```{code-cell} +vae = load_vae_model() + +dest_folder = "./coco_val2017" +image_dir = os.path.join(dest_folder, "val2017") + +if not os.path.exists(image_dir): + raise FileNotFoundError(f"Could not find images folder: {image_dir}") + +image_files = [f for f in os.listdir(image_dir) if f.lower().endswith((".jpg", ".jpeg", ".png", ".JPEG"))][:5] + +if not image_files: + raise Exception("There are no image files in the folder.") + +psnr_scores = [] +ssim_scores = [] + +fig, axes = plt.subplots(5, 2, figsize=(10, 25)) +plt.subplots_adjust(hspace=0.3) + +for i, file_name in enumerate(image_files): + img_path = os.path.join(image_dir, file_name) + raw_img = Image.open(img_path).convert("RGB") + + input_tensor = preprocess(raw_img) + reconstructed_tensor = vae(input_tensor) + reconstructed_img = postprocess(reconstructed_tensor) + + original_resized = raw_img.resize((256, 256)) + + # convert unit8 to numpy array + orig_np = np.array(original_resized) + recon_np = np.array(reconstructed_img) + + # PSNR, SSIM calculation + p_score = psnr(orig_np, recon_np, data_range=255) + s_score = ssim(orig_np, recon_np, channel_axis=2, data_range=255) + + psnr_scores.append(p_score) + ssim_scores.append(s_score) + + # visualization + axes[i, 0].imshow(original_resized) + axes[i, 0].set_title(f"Original: {file_name}") + axes[i, 0].axis("off") + + axes[i, 1].imshow(reconstructed_img) + axes[i, 1].set_title(f"Reconstructed\nPSNR: {p_score:.2f}, SSIM: {s_score:.4f}") + axes[i, 1].axis("off") + +plt.tight_layout() +plt.show() + +print(f"\n{'=' * 40}") +print("--- Final Reconstruction Quality Report (N=5) ---") +print(f"Average PSNR: {np.mean(psnr_scores):.2f} dB") +print(f"Average SSIM: {np.mean(ssim_scores):.4f}") +print(f"{'=' * 40}") +``` + +## **Batch Processing** + +```{code-cell} +def batch_reconstruct_vae(vae, image_paths): + # 1. Preprocessing and batch stacking + input_tensors = [] + original_images_resized = [] + + for path in image_paths: + raw_img = Image.open(path).convert("RGB") + original_resized = raw_img.resize((256, 256)) + original_images_resized.append(original_resized) + + tensor = preprocess(raw_img) + # Assuming the result is in the form [B, H, W, C] + input_tensors.append(tensor[0]) + + batch_tensor = jnp.stack(input_tensors) + + # 2. Inference + recon_batch = vae(batch_tensor) + + # 3. Results processing and indicator calculator + batch_results = [] + + for i in range(len(image_paths)): + recon_img = postprocess(recon_batch[i : i + 1]) + + orig_np = np.array(original_images_resized[i]) + recon_np = np.array(recon_img) + + p_val = psnr(orig_np, recon_np, data_range=255) + s_val = ssim(orig_np, recon_np, channel_axis=2, data_range=255) + + batch_results.append( + { + "name": os.path.basename(image_paths[i]), + "recon_img": recon_img, + "orig_img": original_images_resized[i], + "psnr": p_val, + "ssim": s_val, + } + ) + + return batch_results + + +print("\n" + "=" * 50) +print("VAE BATCH RECONSTRUCTION RESULTS") +print("=" * 50) + +target_paths = [os.path.join(image_dir, f) for f in image_files[:5]] +results = batch_reconstruct_vae(vae, target_paths) + +all_psnr = [] +all_ssim = [] + +for i, res in enumerate(results): + print(f"[{i + 1}] {res['name']}: PSNR={res['psnr']:.2f}dB, SSIM={res['ssim']:.4f}") + all_psnr.append(res["psnr"]) + all_ssim.append(res["ssim"]) + +print("-" * 50) +print(f"Batch Average PSNR: {np.mean(all_psnr):.2f} dB") +print(f"Batch Average SSIM: {np.mean(all_ssim):.4f}") +``` diff --git a/bonsai/models/vae/tests/VAE_segmentation_example.md b/bonsai/models/vae/tests/VAE_segmentation_example.md deleted file mode 100644 index 6a453610..00000000 --- a/bonsai/models/vae/tests/VAE_segmentation_example.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -jupytext: - cell_metadata_filter: -all - formats: ipynb,md:myst - main_language: python - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.18.1 -kernelspec: - display_name: bonsai - language: python - name: python3 ---- - -# **Generative Modeling with a Variational Autoencoder (VAE)** - -This notebook demonstrates how to build, train, and use a Variational Autoencoder (VAE) model from the Bonsai library to generate new images of handwritten digits. - -*This colab demonstrates the VAE implementation from the [Bonsai library](https://github.com/jax-ml/bonsai).* - -+++ - -## **1. Setup and Imports** -First, we'll install the necessary libraries and import our modules. - -```{code-cell} ipython3 -!pip install -q git+https://github.com/jax-ml/bonsai@main -!pip install -q tensorflow-datasets matplotlib -!pip install tensorflow -q -!pip install --upgrade flax -q -``` - -```{code-cell} ipython3 -import os -import sys - -import jax -import jax.numpy as jnp -import matplotlib.pyplot as plt -import tensorflow as tf -import tensorflow_datasets as tfds -from flax import nnx - -from bonsai.models.vae import modeling - -os.chdir("/home/neo/Downloads/CODE_Other_Models/bonsai/bonsai/models/vae") -sys.path.append("/home/neo/Downloads/CODE_Other_Models/bonsai") - - -import sys -from pathlib import Path - -# Add the bonsai root to Python path for imports -bonsai_root = Path.home() -sys.path.insert(0, str(bonsai_root)) - -# Now you can import from the bonsai package without changing directories -from bonsai.models.vae import modeling as vae_lib -from bonsai.models.vae import params as params_lib -``` - -## **2. Load and Preprocess Data** - -We'll use the classic MNIST dataset of handwritten digits. We need to normalize the pixel values to the `[0, 1]` range, which is important for the VAE's reconstruction loss. - -```{code-cell} ipython3 -import sys -from pathlib import Path - - -bonsai_root = Path.home() -if str(bonsai_root) not in sys.path: - sys.path.insert(0, str(bonsai_root)) - - -# --- Load 10 images from the MNIST test set --- -print("Loading 10 MNIST test images...") -ds = tfds.load("mnist", split="test", as_supervised=True) -images_list = [] -labels_list = [] - -for image, label in ds.take(10): - # Preprocess: convert to float32 and normalize - single_image = tf.cast(image, tf.float32) / 255.0 - images_list.append(single_image.numpy()) - labels_list.append(label.numpy()) - -# Stack the images into a single batch -image_batch = jnp.stack(images_list, axis=0) - -print(f"Loaded a batch of 10 images with shape: {image_batch.shape}") -``` - -## **3.Define Model** - -Here we'll configure and instantiate our VAE model. - -```{code-cell} ipython3 -# --- Create a randomly initialized model --- -print("\nCreating a new model with random weights...") - -rngs = nnx.Rngs(params=0, sample=1) -config = modeling.ModelConfig(input_dim=28 * 28, hidden_dims=(512, 256), latent_dim=20) -model = params_lib.create_model(cfg=config, rngs=rngs) # This is all you need! - -print("New model created successfully!") -``` - -## **4. Reconstruct the Input** - -This function performs a full forward pass: image -> encode -> sample -> decode - -```{code-cell} ipython3 -# --- Define the JIT-compiled reconstruction function --- -@jax.jit -def reconstruct(model: vae_lib.VAE, batch: jax.Array, sample_key: jax.Array): - """Encodes and decodes an image batch using the trained VAE.""" - # The model now outputs logits - reconstruction_logits_flat, _, _ = model(batch, sample_key=sample_key) - - reconstructed_probs_flat = jax.nn.sigmoid(reconstruction_logits_flat) - - # Reshape the flat output back to the original image shape - return reconstructed_probs_flat.reshape(batch.shape) - - -# Get a random key for the reparameterization trick -sample_key = rngs.sample() - -print("\nRunning inference to reconstruct images...") -reconstructed_images = reconstruct(model, image_batch, sample_key) -print("Reconstruction complete.") -``` - -## **5. Show Reconstruction** - -We'll create a single, JIT-compiled function to perform one step of training. This function computes the loss, calculates gradients, and applies them to update the model's parameters. - -```{code-cell} ipython3 -# --- Plot the results --- -print("Displaying results...") -fig, axes = plt.subplots(2, 10, figsize=(15, 3.5)) - -for i in range(10): - # Plot original images on the first row - axes[0, i].imshow(image_batch[i, ..., 0], cmap="gray") - axes[0, i].set_title(f"Label: {labels_list[i]}") - axes[0, i].axis("off") - - # Plot reconstructed images on the second row - axes[1, i].imshow(reconstructed_images[i, ..., 0], cmap="gray") - axes[1, i].axis("off") - -# Add row labels -axes[0, 0].set_ylabel("Original", fontsize=12, labelpad=15) -axes[1, 0].set_ylabel("Reconstructed", fontsize=12, labelpad=15) - - -plt.suptitle("VAE Inference: Original vs. Reconstructed MNIST Digits", fontsize=16) -plt.tight_layout(rect=[0, 0, 1, 0.96]) -plt.show() -``` - -## **Conclusion** - -This notebook demonstrated the complete workflow for the Bonsai VAE model: - -1. **Instantiated the VAE model** with a specific configuration. -2. **Loaded and preprocessed** the MNIST dataset. -3. **Defined a loss function** and a JIT-compiled training step. -4. **Trained the model** to reconstruct digits and structure its latent space. -5. **Generated new, plausible handwritten digits** by sampling from the latent space. From 6465ebf26150fda79c1c1b262bd1262419b4a3f8 Mon Sep 17 00:00:00 2001 From: jaewook Date: Fri, 9 Jan 2026 14:22:31 +0900 Subject: [PATCH 3/4] Modify code style --- bonsai/models/vae/modeling.py | 50 +++-- bonsai/models/vae/params.py | 174 +++++++++++------- .../VAE_image_reconstruction_example.ipynb | 11 +- .../tests/VAE_image_reconstruction_example.md | 11 +- bonsai/models/vae/tests/run_model.py | 3 +- bonsai/models/vae/tests/test_outputs_vae.py | 5 +- 6 files changed, 157 insertions(+), 97 deletions(-) diff --git a/bonsai/models/vae/modeling.py b/bonsai/models/vae/modeling.py index 849a850f..cdc0f77a 100644 --- a/bonsai/models/vae/modeling.py +++ b/bonsai/models/vae/modeling.py @@ -1,4 +1,6 @@ -from typing import Optional +import dataclasses + +from typing import Optional, Sequence import jax import jax.image @@ -6,6 +8,21 @@ from flax import nnx +@dataclasses.dataclass(frozen=True) +class ModelConfig: + block_out_channels: Sequence[int] = (128, 256, 512, 512) + latent_channels: int = 4 + norm_num_groups: int = 32 + + @classmethod + def stable_diffusion_v1_5(cls): + return cls( + block_out_channels=[128, 256, 512, 512], + latent_channels=4, + norm_num_groups=32, + ) + + class ResnetBlock(nnx.Module): conv_shortcut: nnx.Data[Optional[nnx.Conv]] @@ -186,9 +203,7 @@ def __call__(self, x): class Encoder(nnx.Module): - def __init__(self, block_out_channels, rngs: nnx.Rngs): - groups = 32 - + def __init__(self, block_out_channels, latent_channels, groups, rngs: nnx.Rngs): self.conv_in = nnx.Conv( in_features=3, out_features=block_out_channels[0], @@ -218,15 +233,14 @@ def __init__(self, block_out_channels, rngs: nnx.Rngs): in_channels = out_channels self.mid_block = UNetMidBlock2D(channels=in_channels, groups=groups, num_res_blocks=2, rngs=rngs) + self.conv_norm_out = nnx.GroupNorm( num_groups=groups, num_features=block_out_channels[-1], epsilon=1e-6, rngs=rngs ) - conv_out_channels = 2 * 4 - self.conv_out = nnx.Conv( in_features=block_out_channels[-1], - out_features=conv_out_channels, + out_features=2 * latent_channels, kernel_size=(3, 3), strides=(1, 1), padding="SAME", @@ -325,9 +339,7 @@ def __call__(self, x): class Decoder(nnx.Module): - def __init__(self, latent_channels, block_out_channels, rngs: nnx.Rngs): - groups = 32 - + def __init__(self, block_out_channels, latent_channels, groups, rngs: nnx.Rngs): self.conv_in = nnx.Conv( in_features=latent_channels, out_features=block_out_channels[-1], @@ -379,28 +391,28 @@ def __call__(self, x): class VAE(nnx.Module): - def __init__(self, rngs: nnx.Rngs): - block_out_channels = [128, 256, 512, 512] - latent_channels = 4 + def __init__(self, cfg: ModelConfig, rngs: nnx.Rngs): + self.encoder = Encoder(cfg.block_out_channels, cfg.latent_channels, cfg.norm_num_groups, rngs) - self.encoder = Encoder(block_out_channels, rngs) self.quant_conv = nnx.Conv( - in_features=2 * latent_channels, - out_features=2 * latent_channels, + in_features=2 * cfg.latent_channels, + out_features=2 * cfg.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", rngs=rngs, ) + self.post_quant_conv = nnx.Conv( - in_features=latent_channels, - out_features=latent_channels, + in_features=cfg.latent_channels, + out_features=cfg.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", rngs=rngs, ) - self.decoder = Decoder(latent_channels=latent_channels, block_out_channels=block_out_channels, rngs=rngs) + + self.decoder = Decoder(cfg.block_out_channels, cfg.latent_channels, cfg.norm_num_groups, rngs) def __call__(self, x): x = self.encoder(x) diff --git a/bonsai/models/vae/params.py b/bonsai/models/vae/params.py index 0545c9ee..2348031b 100644 --- a/bonsai/models/vae/params.py +++ b/bonsai/models/vae/params.py @@ -14,6 +14,7 @@ import logging import re +from enum import Enum import jax import safetensors.flax as safetensors @@ -22,165 +23,206 @@ from bonsai.models.vae import modeling as model_lib -TO_JAX_CONV_2D_KERNEL = (2, 3, 1, 0) # (C_out, C_in, kH, kW) -> (kH, kW, C_in, C_out) -TO_JAX_LINEAR_KERNEL = (1, 0) - def _get_key_and_transform_mapping(): + class Transform(Enum): + """Transformations for model parameters""" + + BIAS = None + LINEAR = ((1, 0), None) + CONV2D = ((2, 3, 1, 0), None) + DEFAULT = None + return { # encoder ## conv in - r"^encoder.conv_in.weight$": (r"encoder.conv_in.kernel", (TO_JAX_CONV_2D_KERNEL, None)), - r"^encoder.conv_in.bias$": (r"encoder.conv_in.bias", None), + r"^encoder.conv_in.weight$": (r"encoder.conv_in.kernel", Transform.CONV2D), + r"^encoder.conv_in.bias$": (r"encoder.conv_in.bias", Transform.BIAS), ## down blocks r"^encoder.down_blocks.([0-3]).resnets.([0-1]).norm([1-2]).weight$": ( r"encoder.down_blocks.\1.resnets.\2.norm\3.scale", - None, + Transform.DEFAULT, ), r"^encoder.down_blocks.([0-3]).resnets.([0-1]).norm([1-2]).bias$": ( r"encoder.down_blocks.\1.resnets.\2.norm\3.bias", - None, + Transform.BIAS, ), r"^encoder.down_blocks.([0-3]).resnets.([0-1]).conv([1-2]).weight$": ( r"encoder.down_blocks.\1.resnets.\2.conv\3.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, ), r"^encoder.down_blocks.([0-3]).resnets.([0-1]).conv([1-2]).bias$": ( r"encoder.down_blocks.\1.resnets.\2.conv\3.bias", - None, + Transform.BIAS, ), r"^encoder.down_blocks.([1-2]).resnets.0.conv_shortcut.weight$": ( r"encoder.down_blocks.\1.resnets.0.conv_shortcut.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, ), r"^encoder.down_blocks.([1-2]).resnets.0.conv_shortcut.bias$": ( r"encoder.down_blocks.\1.resnets.0.conv_shortcut.bias", - None, + Transform.BIAS, ), r"^encoder.down_blocks.([0-2]).downsamplers.0.conv.weight$": ( r"encoder.down_blocks.\1.downsamplers.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, + ), + r"^encoder.down_blocks.([0-2]).downsamplers.0.conv.bias$": ( + r"encoder.down_blocks.\1.downsamplers.bias", + Transform.BIAS, ), - r"^encoder.down_blocks.([0-2]).downsamplers.0.conv.bias$": (r"encoder.down_blocks.\1.downsamplers.bias", None), ## mid block r"^encoder.mid_block.attentions.0.group_norm.weight$": ( r"encoder.mid_block.attentions.0.group_norm.scale", - None, + Transform.DEFAULT, + ), + r"^encoder.mid_block.attentions.0.group_norm.bias$": ( + r"encoder.mid_block.attentions.0.group_norm.bias", + Transform.BIAS, ), - r"^encoder.mid_block.attentions.0.group_norm.bias$": (r"encoder.mid_block.attentions.0.group_norm.bias", None), r"^encoder.mid_block.attentions.0.query.weight$": ( r"encoder.mid_block.attentions.0.to_q.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, ), - r"^encoder.mid_block.attentions.0.query.bias$": (r"encoder.mid_block.attentions.0.to_q.bias", None), + r"^encoder.mid_block.attentions.0.query.bias$": (r"encoder.mid_block.attentions.0.to_q.bias", Transform.BIAS), r"^encoder.mid_block.attentions.0.key.weight$": ( r"encoder.mid_block.attentions.0.to_k.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, ), - r"^encoder.mid_block.attentions.0.key.bias$": (r"encoder.mid_block.attentions.0.to_k.bias", None), + r"^encoder.mid_block.attentions.0.key.bias$": (r"encoder.mid_block.attentions.0.to_k.bias", Transform.BIAS), r"^encoder.mid_block.attentions.0.value.weight$": ( r"encoder.mid_block.attentions.0.to_v.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, ), - r"^encoder.mid_block.attentions.0.value.bias$": (r"encoder.mid_block.attentions.0.to_v.bias", None), + r"^encoder.mid_block.attentions.0.value.bias$": (r"encoder.mid_block.attentions.0.to_v.bias", Transform.BIAS), r"^encoder.mid_block.attentions.0.proj_attn.weight$": ( r"encoder.mid_block.attentions.0.to_out.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, + ), + r"^encoder.mid_block.attentions.0.proj_attn.bias$": ( + r"encoder.mid_block.attentions.0.to_out.bias", + Transform.BIAS, ), - r"^encoder.mid_block.attentions.0.proj_attn.bias$": (r"encoder.mid_block.attentions.0.to_out.bias", None), r"^encoder.mid_block.resnets.([0-1]).conv([1-2]).weight$": ( r"encoder.mid_block.resnets.\1.conv\2.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, + ), + r"^encoder.mid_block.resnets.([0-1]).conv([1-2]).bias$": ( + r"encoder.mid_block.resnets.\1.conv\2.bias", + Transform.BIAS, + ), + r"^encoder.mid_block.resnets.([0-1]).norm([1-2]).weight$": ( + r"encoder.mid_block.resnets.\1.norm\2.scale", + Transform.DEFAULT, + ), + r"^encoder.mid_block.resnets.([0-1]).norm([1-2]).bias$": ( + r"encoder.mid_block.resnets.\1.norm\2.bias", + Transform.BIAS, ), - r"^encoder.mid_block.resnets.([0-1]).conv([1-2]).bias$": (r"encoder.mid_block.resnets.\1.conv\2.bias", None), - r"^encoder.mid_block.resnets.([0-1]).norm([1-2]).weight$": (r"encoder.mid_block.resnets.\1.norm\2.scale", None), - r"^encoder.mid_block.resnets.([0-1]).norm([1-2]).bias$": (r"encoder.mid_block.resnets.\1.norm\2.bias", None), ## conv norm out - r"^encoder.conv_norm_out.weight$": (r"encoder.conv_norm_out.scale", None), - r"^encoder.conv_norm_out.bias$": (r"encoder.conv_norm_out.bias", None), + r"^encoder.conv_norm_out.weight$": (r"encoder.conv_norm_out.scale", Transform.DEFAULT), + r"^encoder.conv_norm_out.bias$": (r"encoder.conv_norm_out.bias", Transform.BIAS), ## conv out - r"^encoder.conv_out.weight$": (r"encoder.conv_out.kernel", (TO_JAX_CONV_2D_KERNEL, None)), - r"^encoder.conv_out.bias": (r"encoder.conv_out.bias", None), + r"^encoder.conv_out.weight$": (r"encoder.conv_out.kernel", Transform.CONV2D), + r"^encoder.conv_out.bias": (r"encoder.conv_out.bias", Transform.BIAS), # latent space ## quant_conv - r"^quant_conv.weight$": (r"quant_conv.kernel", (TO_JAX_CONV_2D_KERNEL, None)), - r"^quant_conv.bias$": (r"quant_conv.bias", None), + r"^quant_conv.weight$": (r"quant_conv.kernel", Transform.CONV2D), + r"^quant_conv.bias$": (r"quant_conv.bias", Transform.BIAS), ## post_quant_conv - r"^post_quant_conv.weight$": (r"post_quant_conv.kernel", (TO_JAX_CONV_2D_KERNEL, None)), - r"^post_quant_conv.bias$": (r"post_quant_conv.bias", None), + r"^post_quant_conv.weight$": (r"post_quant_conv.kernel", Transform.CONV2D), + r"^post_quant_conv.bias$": (r"post_quant_conv.bias", Transform.BIAS), # decoder ## conv in - r"^decoder.conv_in.weight$": (r"decoder.conv_in.kernel", (TO_JAX_CONV_2D_KERNEL, None)), - r"^decoder.conv_in.bias$": (r"decoder.conv_in.bias", None), + r"^decoder.conv_in.weight$": (r"decoder.conv_in.kernel", Transform.CONV2D), + r"^decoder.conv_in.bias$": (r"decoder.conv_in.bias", Transform.BIAS), ## mid block r"^decoder.mid_block.attentions.0.group_norm.weight$": ( r"decoder.mid_block.attentions.0.group_norm.scale", - None, + Transform.DEFAULT, + ), + r"^decoder.mid_block.attentions.0.group_norm.bias$": ( + r"decoder.mid_block.attentions.0.group_norm.bias", + Transform.BIAS, ), - r"^decoder.mid_block.attentions.0.group_norm.bias$": (r"decoder.mid_block.attentions.0.group_norm.bias", None), r"^decoder.mid_block.attentions.0.query.weight$": ( r"decoder.mid_block.attentions.0.to_q.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, ), - r"^decoder.mid_block.attentions.0.query.bias$": (r"decoder.mid_block.attentions.0.to_q.bias", None), + r"^decoder.mid_block.attentions.0.query.bias$": (r"decoder.mid_block.attentions.0.to_q.bias", Transform.BIAS), r"^decoder.mid_block.attentions.0.key.weight$": ( r"decoder.mid_block.attentions.0.to_k.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, ), - r"^decoder.mid_block.attentions.0.key.bias$": (r"decoder.mid_block.attentions.0.to_k.bias", None), + r"^decoder.mid_block.attentions.0.key.bias$": (r"decoder.mid_block.attentions.0.to_k.bias", Transform.BIAS), r"^decoder.mid_block.attentions.0.value.weight$": ( r"decoder.mid_block.attentions.0.to_v.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, ), - r"^decoder.mid_block.attentions.0.value.bias$": (r"decoder.mid_block.attentions.0.to_v.bias", None), + r"^decoder.mid_block.attentions.0.value.bias$": (r"decoder.mid_block.attentions.0.to_v.bias", Transform.BIAS), r"^decoder.mid_block.attentions.0.proj_attn.weight$": ( r"decoder.mid_block.attentions.0.to_out.kernel", - (TO_JAX_LINEAR_KERNEL, None), + Transform.LINEAR, + ), + r"^decoder.mid_block.attentions.0.proj_attn.bias$": ( + r"decoder.mid_block.attentions.0.to_out.bias", + Transform.BIAS, + ), + r"^decoder.mid_block.resnets.([0-1]).norm([1-2]).weight$": ( + r"decoder.mid_block.resnets.\1.norm\2.scale", + Transform.DEFAULT, + ), + r"^decoder.mid_block.resnets.([0-1]).norm([1-2]).bias$": ( + r"decoder.mid_block.resnets.\1.norm\2.bias", + Transform.BIAS, ), - r"^decoder.mid_block.attentions.0.proj_attn.bias$": (r"decoder.mid_block.attentions.0.to_out.bias", None), - r"^decoder.mid_block.resnets.([0-1]).norm([1-2]).weight$": (r"decoder.mid_block.resnets.\1.norm\2.scale", None), - r"^decoder.mid_block.resnets.([0-1]).norm([1-2]).bias$": (r"decoder.mid_block.resnets.\1.norm\2.bias", None), r"^decoder.mid_block.resnets.([0-1]).conv([1-2]).weight$": ( r"decoder.mid_block.resnets.\1.conv\2.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, + ), + r"^decoder.mid_block.resnets.([0-1]).conv([1-2]).bias$": ( + r"decoder.mid_block.resnets.\1.conv\2.bias", + Transform.BIAS, ), - r"^decoder.mid_block.resnets.([0-1]).conv([1-2]).bias": (r"decoder.mid_block.resnets.\1.conv\2.bias", None), ## up blocks r"^decoder.up_blocks.([0-3]).resnets.([0-2]).norm([1-2]).weight$": ( r"decoder.up_blocks.\1.resnets.\2.norm\3.scale", - None, + Transform.DEFAULT, ), r"^decoder.up_blocks.([0-3]).resnets.([0-2]).norm([1-2]).bias$": ( r"decoder.up_blocks.\1.resnets.\2.norm\3.bias", - None, + Transform.BIAS, ), r"^decoder.up_blocks.([0-3]).resnets.([0-2]).conv([1-2]).weight$": ( r"decoder.up_blocks.\1.resnets.\2.conv\3.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, ), r"^decoder.up_blocks.([0-3]).resnets.([0-2]).conv([1-2]).bias$": ( r"decoder.up_blocks.\1.resnets.\2.conv\3.bias", - None, + Transform.BIAS, ), r"^decoder.up_blocks.([2-3]).resnets.0.conv_shortcut.weight$": ( r"decoder.up_blocks.\1.resnets.0.conv_shortcut.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, ), r"^decoder.up_blocks.([2-3]).resnets.0.conv_shortcut.bias$": ( r"decoder.up_blocks.\1.resnets.0.conv_shortcut.bias", - None, + Transform.BIAS, ), r"^decoder.up_blocks.([0-2]).upsamplers.0.conv.weight$": ( r"decoder.up_blocks.\1.upsamplers.conv.kernel", - (TO_JAX_CONV_2D_KERNEL, None), + Transform.CONV2D, + ), + r"^decoder.up_blocks.([0-2]).upsamplers.0.conv.bias$": ( + r"decoder.up_blocks.\1.upsamplers.conv.bias", + Transform.BIAS, ), - r"^decoder.up_blocks.([0-2]).upsamplers.0.conv.bias$": (r"decoder.up_blocks.\1.upsamplers.conv.bias", None), ## conv norm out - r"^decoder.conv_norm_out.weight$": (r"decoder.conv_norm_out.scale", None), - r"^decoder.conv_norm_out.bias$": (r"decoder.conv_norm_out.bias", None), + r"^decoder.conv_norm_out.weight$": (r"decoder.conv_norm_out.scale", Transform.DEFAULT), + r"^decoder.conv_norm_out.bias$": (r"decoder.conv_norm_out.bias", Transform.BIAS), ## conv out - r"^decoder.conv_out.weight$": (r"decoder.conv_out.kernel", (TO_JAX_CONV_2D_KERNEL, None)), - r"^decoder.conv_out.bias$": (r"decoder.conv_out.bias", None), + r"^decoder.conv_out.weight$": (r"decoder.conv_out.kernel", Transform.CONV2D), + r"^decoder.conv_out.bias$": (r"decoder.conv_out.bias", Transform.BIAS), } @@ -226,6 +268,8 @@ def _stoi(s): def create_model_from_safe_tensors( file_dir: str, + cfg: model_lib.ModelConfig, + *, mesh: jax.sharding.Mesh | None = None, ) -> model_lib.VAE: """Load tensors from the safetensors file and create a VAE model.""" @@ -237,7 +281,7 @@ def create_model_from_safe_tensors( for f in files: tensor_dict |= safetensors.load_file(f) - vae = nnx.eval_shape(lambda: model_lib.VAE(rngs=nnx.Rngs(params=0))) + vae = nnx.eval_shape(lambda: model_lib.VAE(cfg=cfg, rngs=nnx.Rngs(params=0))) graph_def, abs_state = nnx.split(vae) jax_state = abs_state.to_pure_dict() @@ -248,7 +292,7 @@ def create_model_from_safe_tensors( if jax_key is None: continue keys = [_stoi(k) for k in jax_key.split(".")] - _assign_weights(keys, tensor, jax_state, st_key, transform) + _assign_weights(keys, tensor, jax_state, st_key, transform.value) if mesh is not None: sharding = nnx.get_named_sharding(abs_state, mesh).to_pure_dict() diff --git a/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb b/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb index 8ab7fab3..01f389b0 100644 --- a/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb +++ b/bonsai/models/vae/tests/VAE_image_reconstruction_example.ipynb @@ -5,7 +5,7 @@ "id": "e320f2616490638c", "metadata": {}, "source": [ - "\"Open" + "\"Open" ] }, { @@ -33,7 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -q git+https://github.com/eari100/bonsai@vae-weights-and-tests\n", + "!pip install -q git+https://github.com/jax-ml/bonsai@main\n", "!pip install -q pillow matplotlib requests\n", "!pip install -q scikit-image" ] @@ -130,17 +130,18 @@ "source": [ "from huggingface_hub import snapshot_download\n", "\n", - "from bonsai.models.vae import params\n", + "from bonsai.models.vae import modeling, params\n", "\n", "\n", "def load_vae_model():\n", " model_name = \"stabilityai/sd-vae-ft-mse\"\n", + " config = modeling.ModelConfig.stable_diffusion_v1_5()\n", "\n", " print(f\"Downloading {model_name}...\")\n", " model_ckpt_path = snapshot_download(model_name)\n", " print(\"Download complete!\")\n", "\n", - " model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path)\n", + " model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path, cfg=config)\n", "\n", " print(\"VAE model loaded_successfully!\")\n", "\n", @@ -178,7 +179,7 @@ "id": "5265f8b4e749fbb6", "metadata": {}, "source": [ - "## **Image Postproessing**" + "## **Image Postprocessing**" ] }, { diff --git a/bonsai/models/vae/tests/VAE_image_reconstruction_example.md b/bonsai/models/vae/tests/VAE_image_reconstruction_example.md index 73ad8798..4cc398f5 100644 --- a/bonsai/models/vae/tests/VAE_image_reconstruction_example.md +++ b/bonsai/models/vae/tests/VAE_image_reconstruction_example.md @@ -7,7 +7,7 @@ jupytext: jupytext_version: 1.18.1 --- -Open In Colab +Open In Colab +++ @@ -20,7 +20,7 @@ This notebook demonstrates image reconstruction using the [Bonsai library](https ## **Set-up** ```{code-cell} -!pip install -q git+https://github.com/eari100/bonsai@vae-weights-and-tests +!pip install -q git+https://github.com/jax-ml/bonsai@main !pip install -q pillow matplotlib requests !pip install -q scikit-image ``` @@ -87,17 +87,18 @@ download_coco_test_set() ```{code-cell} from huggingface_hub import snapshot_download -from bonsai.models.vae import params +from bonsai.models.vae import modeling, params def load_vae_model(): model_name = "stabilityai/sd-vae-ft-mse" + config = modeling.ModelConfig.stable_diffusion_v1_5() print(f"Downloading {model_name}...") model_ckpt_path = snapshot_download(model_name) print("Download complete!") - model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path) + model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path, cfg=config) print("VAE model loaded_successfully!") @@ -118,7 +119,7 @@ def preprocess(image): return jnp.array(image[None, ...]) ``` -## **Image Postproessing** +## **Image Postprocessing** ```{code-cell} def postprocess(tensor): diff --git a/bonsai/models/vae/tests/run_model.py b/bonsai/models/vae/tests/run_model.py index dabe8955..900cff9d 100644 --- a/bonsai/models/vae/tests/run_model.py +++ b/bonsai/models/vae/tests/run_model.py @@ -24,7 +24,8 @@ def run_model(): # 1. Download safetensors file model_ckpt_path = snapshot_download("stabilityai/sd-vae-ft-mse") - model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path) + config = modeling.ModelConfig.stable_diffusion_v1_5() + model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path, cfg=config) # 2. Prepare dummy input batch_size = 1 diff --git a/bonsai/models/vae/tests/test_outputs_vae.py b/bonsai/models/vae/tests/test_outputs_vae.py index bda3269c..764bb0ed 100644 --- a/bonsai/models/vae/tests/test_outputs_vae.py +++ b/bonsai/models/vae/tests/test_outputs_vae.py @@ -19,14 +19,15 @@ from diffusers.models import AutoencoderKL from huggingface_hub import snapshot_download -from bonsai.models.vae import params +from bonsai.models.vae import modeling, params class TestModuleForwardPasses(parameterized.TestCase): def _get_models_and_input_size(): weight = "stabilityai/sd-vae-ft-mse" model_ckpt_path = snapshot_download(weight) - nnx_model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path) + config = modeling.ModelConfig.stable_diffusion_v1_5() + nnx_model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path, cfg=config) dif_model = AutoencoderKL.from_pretrained(weight) return nnx_model, dif_model From d24c07b0ca8acf508538b4deac6e8737273c891e Mon Sep 17 00:00:00 2001 From: jaewook Date: Sat, 10 Jan 2026 02:26:49 +0900 Subject: [PATCH 4/4] Add intermediate tests --- bonsai/models/vae/tests/test_outputs_vae.py | 107 +++++++++++++++++--- 1 file changed, 91 insertions(+), 16 deletions(-) diff --git a/bonsai/models/vae/tests/test_outputs_vae.py b/bonsai/models/vae/tests/test_outputs_vae.py index 764bb0ed..6d894975 100644 --- a/bonsai/models/vae/tests/test_outputs_vae.py +++ b/bonsai/models/vae/tests/test_outputs_vae.py @@ -23,29 +23,104 @@ class TestModuleForwardPasses(parameterized.TestCase): - def _get_models_and_input_size(): - weight = "stabilityai/sd-vae-ft-mse" - model_ckpt_path = snapshot_download(weight) - config = modeling.ModelConfig.stable_diffusion_v1_5() - nnx_model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path, cfg=config) - dif_model = AutoencoderKL.from_pretrained(weight) + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.torch_device = "cpu" + cls.img_size = 256 - return nnx_model, dif_model + model_name = "stabilityai/sd-vae-ft-mse" + model_ckpt_path = snapshot_download(model_name) + torch_cfg = modeling.ModelConfig.stable_diffusion_v1_5() + cls.jax_model = params.create_model_from_safe_tensors(file_dir=model_ckpt_path, cfg=torch_cfg) + cls.dif_model = AutoencoderKL.from_pretrained(model_name) + + def test_encoder(self): + batch = 1 + tx = torch.rand((batch, 3, self.img_size, self.img_size), dtype=torch.float32) + jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) + + tm = self.dif_model.encoder.to(self.torch_device).eval() + jm = self.jax_model.encoder + + with torch.no_grad(): + ty = tm(tx) + jy = jm(jx) + + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=5e-3) + + def test_quant_conv(self): + batch = 1 + tx = torch.rand((batch, 8, 32, 32), dtype=torch.float32) + jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) + + tm = self.dif_model.quant_conv.to(self.torch_device).eval() + jm = self.jax_model.quant_conv + + with torch.no_grad(): + ty = tm(tx) + jy = jm(jx) + + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=5e-3) + + def test_post_quant_conv(self): + batch = 1 + tx = torch.rand((batch, 8, 32, 32), dtype=torch.float32) + jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) + + t_mean, _ = torch.chunk(tx, chunks=2, dim=1) + j_mean, _ = jnp.split(jx, 2, axis=-1) + + tm = self.dif_model.post_quant_conv.to(self.torch_device).eval() + jm = self.jax_model.post_quant_conv + + with torch.no_grad(): + ty = tm(t_mean) + jy = jm(j_mean) + + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=8e-3) + + def test_decoder(self): + batch = 1 + tx = torch.rand((batch, 4, 32, 32), dtype=torch.float32) + jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) + + tm = self.dif_model.decoder.to(self.torch_device).eval() + jm = self.jax_model.decoder + + with torch.no_grad(): + ty = tm(tx) + jy = jm(jx) + + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=5e-3) def test_full(self): - nnx_model, dif_model = TestModuleForwardPasses._get_models_and_input_size() - device = "cpu" - dif_model.to(device).eval() + batch = 1 + tx = torch.rand((batch, 3, self.img_size, self.img_size), dtype=torch.float32) + jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) - batch = 32 - img_size = 256 + tm = self.dif_model.to(self.torch_device).eval() + jm = self.jax_model + + with torch.no_grad(): + ty = tm(tx).sample + jy = jm(jx) + + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=5e-3) - tx = torch.rand((batch, 3, img_size, img_size), dtype=torch.float32) + def test_full_batched(self): + batch = 32 + tx = torch.rand((batch, 3, self.img_size, self.img_size), dtype=torch.float32) jx = jnp.permute_dims(tx.detach().cpu().numpy(), (0, 2, 3, 1)) - jy = nnx_model(jx) + + tm = self.dif_model.to(self.torch_device).eval() + jm = self.jax_model + with torch.no_grad(): - ty = dif_model(tx).sample - np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=9e-1) + ty = tm(tx).sample + jy = jm(jx) + + np.testing.assert_allclose(jy, ty.permute(0, 2, 3, 1).cpu().detach().numpy(), atol=5e-3) if __name__ == "__main__":