diff --git a/examples/drivaer_fastback.py b/examples/drivaer_fastback.py new file mode 100644 index 00000000..58fa09d2 --- /dev/null +++ b/examples/drivaer_fastback.py @@ -0,0 +1,880 @@ +import neon +import warp as wp +import numpy as np +import time +import os +import re +import matplotlib.pyplot as plt +import trimesh +import shutil + +import xlb +from xlb.compute_backend import ComputeBackend +from xlb.precision_policy import PrecisionPolicy +from xlb.grid import multires_grid_factory +from xlb.operator.boundary_condition import ( + FullwayBounceBackBC, + HalfwayBounceBackBC, + RegularizedBC, + ExtrapolationOutflowBC, + DoNothingBC, + ZouHeBC, + HybridBC, +) +from xlb.operator.boundary_masker import MeshVoxelizationMethod +from xlb.utils.mesher import make_cuboid_mesh, MultiresIO +from xlb.utils.makemesh import generate_mesh +from xlb.operator.force import MultiresMomentumTransfer +from xlb.helper.initializers import CustomMultiresInitializer +from xlb import MresPerfOptimizationType +from typing import Any + + +wp.clear_kernel_cache() +wp.config.quiet = True + +# User Configuration +# ================= +# Physical and simulation parameters +voxel_size = 0.0025 # Finest voxel size in meters +ulb = 0.05 # Lattice velocity +u_physical = 38.0 # Physical inlet velocity in m/s (user input) +flow_passes = 3 # Domain flow passes +kinematic_viscosity = 1.508e-5 # Kinematic viscosity of air in m^2/s 1.508e-5 +cs = 1/np.sqrt(3) +ma = ulb/ cs + +trim = True +trim_voxels = 3 + +# STL filename +stl_filename = "examples/stl/drivaer_fb_engine.stl" +script_name = "Drivaer_Fastback_omega" + +# I/O settings +print_interval_percentage = .2 # Print every 1% of iterations +file_output_crossover_percentage = 80 # Crossover at 50% of iterations +num_file_outputs_pre_crossover = 3 # Outputs before crossover +num_file_outputs_post_crossover = 10 # Outputs after crossover + +# Other setup parameters +compute_backend = ComputeBackend.NEON +precision_policy = PrecisionPolicy.FP32FP32 +velocity_set = xlb.velocity_set.D3Q27(precision_policy=precision_policy, compute_backend=compute_backend) + +# Choose mesher type +mesher_type = "makemesh" # Options: "makemesh" or "cuboid" + +# Mesh Generation Functions +# ========================= +def generate_makemesh_mesh(stl_filename, voxel_size, trim, trim_voxels, ground_refinement_level=-1, ground_voxel_height=6): + """ + Generate a makemesh mesh based on the provided voxel size in meters, domain multipliers, and padding values. + """ + # Number of requested refinement levels + num_levels = 5 + + # Domain multipliers for the full domain + domain_multiplier = { + "-x": 2.5, + "x": 3.5, + "-y": 1.75, + "y": 1.75, + "-z": 0.0, + "z": 4, + } + + padding_values = [ + #[25, 80, 30, 30, 30, 50], + [15, 15, 15, 15, 15, 15], + [10, 40, 10, 10, 10, 10], + [8, 20, 8, 8, 8, 8], + [8, 20, 8, 8, 8, 8], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + + ] + + + # Load the mesh + mesh = trimesh.load_mesh(stl_filename, process=False) + if mesh.is_empty: + raise ValueError("Loaded mesh is empty or invalid.") + + # Compute original bounds + min_bound = mesh.vertices.min(axis=0) + max_bound = mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + x0 = [max_bound[0]-0.603, min_bound[1]+(0.5*partSize[1]), min_bound[2]] #Center of wheelbase for Drivaer + + + # Compute translation to put mesh into first octant of the domain + shift = np.array( + [ + domain_multiplier["-x"] * partSize[0] - min_bound[0], + domain_multiplier["-y"] * partSize[1] - min_bound[1], + domain_multiplier["-z"] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + + # Apply translation and save out temp STL + mesh.apply_translation(shift) + _ = mesh.vertex_normals + mesh.export("temp.stl") + # Generate mesh using make_cuboid_mesh + # Generate mesh using generate_mesh with ground refinement + level_data, _, sparsity_pattern, level_origins = generate_mesh( + num_levels, + "temp.stl", + voxel_size, + padding_values, + domain_multiplier, + ground_refinement_level=ground_refinement_level, + ground_voxel_height=ground_voxel_height, + ) + + if trim == True: + zShift = trim_voxels + plane_origin = np.array([0, 0, mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export('temp.stl') + body_stl = 'temp.stl' + mesh = trimesh.load_mesh(body_stl, process=False) + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + else: + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + + + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {num_levels}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + #os.remove("temp.stl") + + return level_data, mesh_vertices, tuple([int(a) for a in grid_shape_finest]), partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 + +def generate_cuboid_mesh(stl_filename, voxel_size, trim, trim_voxels): + """ + Alternative cuboid mesh generation based on Apolo's method with domain multipliers per level. + """ + # Domain multipliers for each refinement level + #domain_multiplier = [ + # [3.0, 4.0, 2.5, 2.5, 0.0, 4.0], # -x, x, -y, y, -z, z0.17361 + # [1.2, 1.25, 1.75, 1.75, 0.0, 1.5], # -x, x, -y, y, -z, z + # [0.8, 1.0, 1.25, 1.25, 0.0, 1.2], # -x, x, -y, y, -z, z + # [0.4, 0.4, 0.25, 0.25, 0.0, 0.25], + + #] + + domain_multiplier = [ + [2.0, 3.0, 1.5, 1.5, 0.0, 3.7], # -x, x, -y, y, -z, z + #[1.8, 1.6, 1.2, 1.2 , 0.0, 2.0], # -x, x, -y, y, -z, z + [1.4, 1.25, 1.0, 1.0, 0.0, 1.6], # -x, x, -y, y, -z, z + [0.8, 1.0, 0.6, 0.6, 0.0, 1.2], + #[0.4, 0.4, 0.25, 0.25, 0.0, 0.25], # -x, x, -y, y, -z, z + #[0.55, 0.65, 0.65, 0.65, 0.0, 0.65], + #[0.25, 0.25, 0.22, 0.22, 0.0, 0.25], + [0.25, 0.25, 0.35, 0.35, 0.0, 0.35], + + ] + + # Load the mesh + mesh = trimesh.load_mesh(stl_filename, process=False) + if mesh.is_empty: + raise ValueError("Loaded mesh is empty or invalid.") + + # Compute original bounds + min_bound = mesh.vertices.min(axis=0) + max_bound = mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + x0 = [max_bound[0]-0.603, min_bound[1]+(0.5*partSize[1]), min_bound[2]] #Center of wheelbase for Drivaer + # Compute translation to put mesh into first octant of the domain + shift = np.array( + [ + domain_multiplier[0][0] * partSize[0] - min_bound[0], + domain_multiplier[0][2] * partSize[1] - min_bound[1], + domain_multiplier[0][4] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + + # Apply translation and save out temp STL + mesh.apply_translation(shift) + _ = mesh.vertex_normals + mesh.export("temp.stl") + # Generate mesh using make_cuboid_mesh + level_data, sparsity_pattern, level_origins = make_cuboid_mesh( + voxel_size, + domain_multiplier, + "temp.stl", + ) + if trim == True: + zShift = trim_voxels + plane_origin = np.array([0, 0, mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export('temp.stl') + body_stl = 'temp.stl' + mesh = trimesh.load_mesh(body_stl, process=False) + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + else: + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + + + + + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {len(domain_multiplier)}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + #os.remove("temp.stl") + + return level_data, mesh_vertices, tuple([int(a) for a in grid_shape_finest]), partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 + +# Boundary Conditions Setup +# ========================= +def setup_boundary_conditions(grid, level_data, body_vertices, ulb, num_steps, compute_backend=ComputeBackend.NEON): + """ + Set up boundary conditions for the simulation. + """ + num_levels = len(level_data) + coarsest_level = num_levels - 1 + box = grid.bounding_box_indices(shape=grid.level_to_shape(coarsest_level)) + left_indices = grid.boundary_indices_across_levels(level_data, box_side="left", remove_edges=True) + right_indices = grid.boundary_indices_across_levels(level_data, box_side="right", remove_edges=True) + top_indices = grid.boundary_indices_across_levels(level_data, box_side="top", remove_edges=False) + bottom_indices = grid.boundary_indices_across_levels(level_data, box_side="bottom", remove_edges=False) + front_indices = grid.boundary_indices_across_levels(level_data, box_side="front", remove_edges=False) + back_indices = grid.boundary_indices_across_levels(level_data, box_side="back", remove_edges=False) + + # Filter front and back indices to remove overlaps with top and bottom at each level + filtered_front_indices = [] + filtered_back_indices = [] + filtered_top_indices = [] + filtered_bottom_indices = [] + for level in range(num_levels): + left_set = set(zip(*left_indices[level])) if left_indices[level] else set() + right_set = set(zip(*right_indices[level])) if right_indices[level] else set() + top_set = set(zip(*top_indices[level])) if top_indices[level] else set() + bottom_set = set(zip(*bottom_indices[level])) if bottom_indices[level] else set() + front_set = set(zip(*front_indices[level])) if front_indices[level] else set() + back_set = set(zip(*back_indices[level])) if back_indices[level] else set() + filtered_front_set = front_set - (top_set | bottom_set) + filtered_back_set = back_set - (top_set | bottom_set) + filtered_top_set = top_set - (left_set | right_set) + filtered_bottom_set = bottom_set - (left_set | right_set) + filtered_front_indices.append( + [list(coords) for coords in zip(*filtered_front_set)] if filtered_front_set else [] + ) + filtered_back_indices.append( + [list(coords) for coords in zip(*filtered_back_set)] if filtered_back_set else [] + ) + filtered_top_indices.append( + [list(coords) for coords in zip(*filtered_top_set)] if filtered_top_set else [] + ) + filtered_bottom_indices.append( + [list(coords) for coords in zip(*filtered_bottom_set)] if filtered_bottom_set else [] + ) + + # Turbulent Flow Profile + def bc_profile_taper(taper_fraction=0.07): + assert compute_backend == ComputeBackend.NEON + _, ny, nz = grid_shape_zip + dtype = precision_policy.compute_precision.wp_dtype + H_y = dtype(ny-1) + H_z = dtype(nz-1) + two = dtype(2.0) + ulb_wp = dtype(ulb) + taper_frac = dtype(taper_fraction) + core_frac = dtype(1.0 - 2.0 * taper_fraction) + _u_vec = wp.vec(velocity_set.d,dtype=dtype) + + @wp.func + def bc_profile_warp(index: wp.vec3i): + y = dtype(index[1]) + z = dtype(index[2]) + y_center = wp.abs(y - (H_y / two)) + z_center = wp.abs(z - (H_z / two)) + y_norm = two * y_center / H_y + z_norm = two * z_center / H_z + max_norm = wp.max(y_norm, z_norm) + velocity = ulb_wp + if max_norm > core_frac: + velocity = ulb_wp * (dtype(1.0) - (max_norm - core_frac) / taper_frac) + velocity = wp.max(dtype(0.0), velocity) + return wp.vec(velocity, length=1) + + return bc_profile_warp + + def bc_ramp(): + assert compute_backend == ComputeBackend.NEON + dtype = precision_policy.compute_precision.wp_dtype + ramp_start_fraction = dtype(0.5) # Initial velocity fraction (20%) + ramp_fraction = dtype(0.05) # Fraction of num_steps for ramping + ramp_steps = int(ramp_fraction * dtype(num_steps)) + ulb_wp = dtype(ulb) + _u_vec = wp.vec(velocity_set.d, dtype=dtype) + zero = dtype(0.0) + + @wp.func + def ramped_inlet_profile(index: wp.vec3i, timestep: Any): + """ + Time-dependent inlet velocity profile with linear ramp. + - index: Spatial index (wp.vec3i) + - timestep: Current lattice timestep (float) + Returns: wp.vec3 velocity at this timestep. + """ + # Linear ramp: from ramp_start_fraction to 1.0 over ramp_steps + if timestep < ramp_steps: + ramp_factor = ramp_start_fraction + (1.0 - ramp_start_fraction) * dtype(timestep / ramp_steps) + else: + ramp_factor = 1.0 + velocity = ulb_wp * ramp_factor + #return wp.vec(velocity, length=1) + return _u_vec(velocity, zero, zero) + + return ramped_inlet_profile + + # Initialize boundary conditions + + + bc_inlet = HybridBC( + bc_method="nonequilibrium_regularized", + #profile=bc_ramp(), + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices + ) + + #bc_inlet = RegularizedBC( + # "velocity", + # profile=bc_ramp(), + # #prescribed_value=(ulb, 0.0, 0.0), + # indices=left_indices, + #) + + bc_outlet = DoNothingBC(indices=right_indices) + + #bc_top = FullwayBounceBackBC(indices=top_indices) + bc_top = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=top_indices) + + bc_bottom = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=bottom_indices) + #bc_bottom = FullwayBounceBackBC(indices=bottom_indices) + #bc_front = FullwayBounceBackBC(indices=filtered_front_indices) + bc_front = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=filtered_front_indices) + #bc_back = FullwayBounceBackBC(indices=filtered_back_indices) + bc_back = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=filtered_back_indices) + + bc_body = HybridBC( + bc_method="bounceback_grads", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=1), + use_mesh_distance=True, + ) + + return [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + # return [bc_walls, bc_inlet, bc_outlet, bc_body] + + +# Simulation Initialization +# ========================= +def initialize_simulation(grid, boundary_conditions, omega, initializer, collision_type="KBC", mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST): + """ + Initialize the multiresolution simulation manager. + """ + sim = xlb.helper.MultiresSimulationManager( + omega=omega, + grid=grid, + boundary_conditions=boundary_conditions, + collision_type=collision_type, + initializer=initializer, + mres_perf_opt=mres_perf_opt, + ) + return sim + +# Utility Functions +# ================= +def print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size): + """ + Calculate and print lift and drag coefficients. + """ + boundary_force = momentum_transfer(sim.f_0, sim.f_1, sim.bc_mask, sim.missing_mask) + drag = boundary_force[0] + lift = boundary_force[2] + cd = 2.0 * drag / (ulb**2 * reference_area) + cl = 2.0 * lift / (ulb**2 * reference_area) + if np.isnan(cd) or np.isnan(cl): + raise ValueError(f"NaN detected in coefficients at step {step}: Cd={cd}, Cl={cl}") + drag_values.append([cd, cl]) + # print(f"CD={cd:.3f}, CL={cl:.3f}, Drag Force (lattice units)={drag:.6f}") + return cd, cl, drag + +def plot_drag_lift(drag_values, output_dir, print_interval, script_name, percentile_range=(15, 85), use_log_scale=False): + """ + Plot CD and CL over time and save the plot to the output directory. + """ + drag_values_array = np.array(drag_values) + steps = np.arange(0, len(drag_values) * print_interval, print_interval) + cd_values = drag_values_array[:, 0] + cl_values = drag_values_array[:, 1] + y_min = np.percentile(cd_values, percentile_range[0]) + y_max = np.percentile(cd_values, percentile_range[1]) + padding = (y_max - y_min) * 0.1 + y_min, y_max = y_min - padding, y_max + padding + if use_log_scale: + y_min = max(y_min, 1e-6) + plt.figure(figsize=(10, 6)) + plt.plot(steps, cd_values, label='Drag Coefficient (Cd)', color='blue') + plt.xlabel('Simulation Step') + plt.ylabel('Coefficient') + plt.title(f'{script_name}: Drag Coefficients Over Time') + plt.legend() + plt.grid(True) + plt.ylim(y_min, y_max) + if use_log_scale: + plt.yscale('log') + plt.savefig(os.path.join(output_dir, 'drag_lift_plot.png')) + plt.close() + + +def compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size): + """ + Compute active/solid voxels, totals, lattice updates, and reference area based on simulation data. + """ + # Compute macro fields + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + fields_data = bc_mask_exporter.get_fields_data({"bc_mask": sim.bc_mask}) + bc_mask_data = fields_data["bc_mask_0"] + level_id_field = bc_mask_exporter.level_id_field + + # Compute solid voxels per level (assuming 255 is the solid marker) + solid_voxels = [] + for lvl in range(actual_num_levels): + level_mask = level_id_field == lvl + solid_voxels.append(np.sum(bc_mask_data[level_mask] == 255)) + + # Compute active voxels (total non-zero in sparsity minus solids) + active_voxels = [np.count_nonzero(mask) for mask in sparsity_pattern] + active_voxels = [max(0, active_voxels[lvl] - solid_voxels[lvl]) for lvl in range(actual_num_levels)] + + # Totals + total_voxels = sum(active_voxels) + total_lattice_updates_per_step = sum(active_voxels[lvl] * (2 ** (actual_num_levels - 1 - lvl)) for lvl in range(actual_num_levels)) + + # Compute reference area (projected on YZ plane at finest level) + finest_level = 0 + mask_finest = level_id_field == finest_level + bc_mask_finest = bc_mask_data[mask_finest] + active_indices_finest = np.argwhere(level_data[0][0]) + bc_body_id = boundary_conditions[-1].id # Assuming last BC is bc_body + solid_voxels_indices = active_indices_finest[bc_mask_finest == bc_body_id] + unique_jk = np.unique(solid_voxels_indices[:, 1:3], axis=0) + reference_area = unique_jk.shape[0] + reference_area_physical = reference_area * (voxel_size ** 2) + + return { + "active_voxels": active_voxels, + "solid_voxels": solid_voxels, + "total_voxels": total_voxels, + "total_lattice_updates_per_step": total_lattice_updates_per_step, + "reference_area": reference_area, + "reference_area_physical": reference_area_physical + } + + +def plot_data(x0, output_dir, delta_x_coarse, sim, IOexporter, prefix='Drivaer_Fastback'): + ''' + Drivear Car Model + https://repository.lboro.ac.uk/articles/dataset/DrivAer_Experimental_Aerodynamic_Dataset/12881213 + Profiles on symmetry plane (y=0) covering entire field + Origin of coordinate system: + x=0: center of the car, y=0: symmetry plane, z=0: ground plane + + Coordaintes in meters + Velocity data in m/s + + Key is Xlocation + Value X is vx + Value Y is z + ''' + + def _load_sim_line(csv_path): + """ + Read a CSV exported by IOexporter.to_line without pandas. + Returns (z, Ux). + """ + # Read with header as column names + data = np.genfromtxt( + csv_path, + delimiter=',', + names=True, # use header + autostrip=True, + dtype=None, # let numpy infer dtypes + encoding='utf-8' # handle any non-ascii names + ) + if data.size == 0: + raise ValueError(f"No data in {csv_path}") + + names = data.dtype.names + lower = {n: n.lower() for n in names} + + # Find z-like column (fallback: first column) + z_candidates = [ + n for n in names + if lower[n] == 'z' + or lower[n] in ('s', 'distance', 'arc_length', 'arclength') + or 'z' == lower[n].split('_')[-1] + ] + z_name = z_candidates[0] if z_candidates else names[0] + + # Find velocity-x column (fallback: last column) + vel_candidates = [n for n in names if any(k in lower[n] for k in ('value', 'u', 'velocity'))] + # Prefer an x-component if present (common patterns after numpy sanitizes names) + vel_x_pref = [n for n in vel_candidates if any(k in lower[n] for k in ('x', '_0', '0'))] + vel_name = vel_x_pref[0] if vel_x_pref else (vel_candidates[0] if vel_candidates else names[-1]) + + z = np.asarray(data[z_name], dtype=float) + ux = np.asarray(data[vel_name], dtype=float) + return z, ux + + testData = { + '-0.781' : { 'x' : [0,38.69,38.88,38.52,38.77,38.52,38.38,38.38,38.52,38.62,38.32,38.4,38.36,37.74,37.69,38,38.14,38.27,38.57,38.7,38.78,38.73,38.53,38.04,37.68,37.84,37.42,37.74,37.83,37.91,38.19,38.03,38.1,38.02,37.82,37.78,37.75,38.45,38.23,37.2,37.12,38.45,38.24,37.66,38.23,38.22,37.56,36.48,37.27,37.36,37.92,37.74,37.78,37.56,37.05,36.83,37.29,37.08,37.2,36.96,36.5,36.6,36.28,36.51,36.48,36.3,35.47,36.33,36.68,37.33,35.64,35.99,34.22,34.96,36.35,36.36,36.36,36.62,36.61,36.25,36.62,35.95,35.82,35.89,35.76,36.49,35.92,35.93,35.29,35.53,36.28,35.64,35.43,35.34,34.74,35.46,36.21,35.57,35.5,36.77,35.58,36.84,37.26,35.79,35.1,35.51,35.06,35,35.56,36.56,37.21,35.19,35.36,35.42,35.82,35.01,35.39,34.09,35.42,35.05,34.69,34.93,34.09,33.77,34.17,33.86,35.67,34.73,34.54,33.34,34.54,33.99,34.96,35.28,34.85,35.63,35.3,35.4,35.31,36.33,34.85,35.2,34.94,34.85,34.65,34.47,33.75,32.88,33.09,34.19,33.23,33.33,33.52,33.29,32.43,31.72,31.78,33.61,34.13,33.8,32.92,32.64,32.42,29.84,29.66,28.73,29.5,27.54,28.6,26.47,25.18,25.09,23.95,22.08,0,0,0,0,0,0], 'y' : [0.322,0.32,0.319,0.317,0.315,0.314,0.312,0.31,0.308,0.307,0.305,0.303,0.302,0.3,0.298,0.297,0.295,0.293,0.291,0.29,0.288,0.286,0.285,0.283,0.281,0.279,0.278,0.276,0.274,0.273,0.271,0.269,0.268,0.266,0.264,0.262,0.261,0.259,0.257,0.256,0.254,0.252,0.251,0.249,0.247,0.245,0.244,0.242,0.24,0.239,0.237,0.235,0.234,0.232,0.23,0.228,0.227,0.225,0.223,0.222,0.22,0.218,0.216,0.215,0.213,0.211,0.21,0.208,0.206,0.205,0.203,0.201,0.199,0.198,0.196,0.194,0.193,0.191,0.189,0.188,0.186,0.184,0.182,0.181,0.179,0.177,0.176,0.174,0.172,0.17,0.169,0.167,0.165,0.164,0.162,0.16,0.159,0.157,0.155,0.153,0.152,0.15,0.148,0.147,0.145,0.143,0.142,0.14,0.138,0.136,0.135,0.133,0.131,0.13,0.128,0.126,0.125,0.123,0.121,0.119,0.118,0.116,0.114,0.113,0.111,0.109,0.107,0.106,0.104,0.102,0.101,0.099,0.097,0.096,0.094,0.092,0.09,0.089,0.087,0.085,0.084,0.082,0.08,0.079,0.077,0.075,0.073,0.072,0.07,0.068,0.067,0.065,0.063,0.061,0.06,0.058,0.056,0.055,0.053,0.051,0.05,0.048,0.046,0.044,0.043,0.041,0.039,0.038,0.036,0.034,0.033,0.031,0.029,0.027,0.026,0.024,0.022,0.021,0.019,0.017]}, + '-0.614' : { 'x' : [0,38.37,38.32,38.21,38.4,38.15,38.12,38.18,38.06,38.09,38.07,38.07,37.73,37.33,37.33,37.65,37.59,37.46,37.42,37.47,37.53,37.41,37.14,37.21,37.18,37.24,37.17,37.03,36.94,36.88,36.8,36.57,36.73,36.65,36.4,36.13,35.88,35.76,35.91,36.02,35.94,35.7,35.64,35.45,35.47,35.4,35.38,35.35,35.39,35.18,34.88,35.11,34.86,34.68,34.53,34.53,34.58,34.33,33.94,33.68,33.77,33.61,33.62,33.48,33.29,33.28,33.13,33.21,33.12,32.82,32.35,32.32,32.22,31.65,31.58,31.6,31.59,31.49,31.37,30.98,30.88,30.51,30.53,30.32,30.26,29.86,29.89,29.4,29.47,29.19,28.68,28.89,28.72,28.48,28.67,28.21,28.06,27.84,27.57,27.27,27.16,27.2,27.34,26.38,26.34,26.08,26.65,26,25.89,26.11,25.49,25.94,25.92,26.07,25.69,25.83,25.37,25.17,25.28,25.54,25.04,24.4,24.41,24.47,24.72,24.94,24.53,25.11,24.17,24.7,24.93,24.95,23.83,25.55,25.19,24.93,25.03,24.96,24.37,24.63,25.09,25.21,25.39,25.7,25.68,25.78,25.07,25.23,25.31,24.38,24.81,24.74,24.49,24.7,24.95,25.47,26.38,26.54,25.68,25.22,25.71,25.05,24.82,25.4,25.97,27.11,26.61,25.06,24.13,21.25,21,20.64,20.02,21.65,0,0,0,0,0,0], 'y' : [0.322,0.32,0.319,0.317,0.315,0.314,0.312,0.31,0.308,0.307,0.305,0.303,0.302,0.3,0.298,0.297,0.295,0.293,0.291,0.29,0.288,0.286,0.285,0.283,0.281,0.279,0.278,0.276,0.274,0.273,0.271,0.269,0.268,0.266,0.264,0.262,0.261,0.259,0.257,0.256,0.254,0.252,0.251,0.249,0.247,0.245,0.244,0.242,0.24,0.239,0.237,0.235,0.234,0.232,0.23,0.228,0.227,0.225,0.223,0.222,0.22,0.218,0.216,0.215,0.213,0.211,0.21,0.208,0.206,0.205,0.203,0.201,0.199,0.198,0.196,0.194,0.193,0.191,0.189,0.188,0.186,0.184,0.182,0.181,0.179,0.177,0.176,0.174,0.172,0.17,0.169,0.167,0.165,0.164,0.162,0.16,0.159,0.157,0.155,0.153,0.152,0.15,0.148,0.147,0.145,0.143,0.142,0.14,0.138,0.136,0.135,0.133,0.131,0.13,0.128,0.126,0.125,0.123,0.121,0.119,0.118,0.116,0.114,0.113,0.111,0.109,0.107,0.106,0.104,0.102,0.101,0.099,0.097,0.096,0.094,0.092,0.09,0.089,0.087,0.085,0.084,0.082,0.08,0.079,0.077,0.075,0.073,0.072,0.07,0.068,0.067,0.065,0.063,0.061,0.06,0.058,0.056,0.055,0.053,0.051,0.05,0.048,0.046,0.044,0.043,0.041,0.039,0.038,0.036,0.034,0.033,0.031,0.029,0.027,0.026,0.024,0.022,0.021,0.019,0.017]}, + '-0.521' : { 'x' : [0,40.11,39.99,39.96,40,40.26,40.07,39.93,39.86,40.14,40.18,39.82,39.77,39.87,40,39.85,39.9,39.93,39.91,40.29,40.14,40.19,40.12,40.08,40.16,40.14,39.86,39.98,39.9,40.03,40.02,40.07,40.22,40.05,40.13,39.87,39.96,39.8,39.79,39.75,40.04,40.04,40.16,40.13,40.06,40,40.03,40.14,40.18,40.24,40.39,40.38,40.11,40.18,40.12,40.1,40.25,40.46,40.25,40.37,40.28,40.15,40.39,40.67,40.78,40.76,40.93,40.95,40.71,40.79,41,41.11,41.19,41.38,41.44,41.6,41.7,41.51,40.83,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 'y' : [0.322,0.32,0.319,0.317,0.315,0.314,0.312,0.31,0.308,0.307,0.305,0.303,0.302,0.3,0.298,0.297,0.295,0.293,0.291,0.29,0.288,0.286,0.285,0.283,0.281,0.279,0.278,0.276,0.274,0.273,0.271,0.269,0.268,0.266,0.264,0.262,0.261,0.259,0.257,0.256,0.254,0.252,0.251,0.249,0.247,0.245,0.244,0.242,0.24,0.239,0.237,0.235,0.234,0.232,0.23,0.228,0.227,0.225,0.223,0.222,0.22,0.218,0.216,0.215,0.213,0.211,0.21,0.208,0.206,0.205,0.203,0.201,0.199,0.198,0.196,0.194,0.193,0.191,0.189,0.188,0.186,0.184,0.182,0.181,0.179,0.177,0.176,0.174,0.172,0.17,0.169,0.167,0.165,0.164,0.162,0.16,0.159,0.157,0.155,0.153,0.152,0.15,0.148,0.147,0.145,0.143,0.142,0.14,0.138,0.136,0.135,0.133,0.131,0.13,0.128,0.126,0.125,0.123,0.121,0.119,0.118,0.116,0.114,0.113,0.111,0.109,0.107,0.106,0.104,0.102,0.101,0.099,0.097,0.096,0.094,0.092,0.09,0.089,0.087,0.085,0.084,0.082,0.08,0.079,0.077,0.075,0.073,0.072,0.07,0.068,0.067,0.065,0.063,0.061,0.06,0.058,0.056,0.055,0.053,0.051,0.05,0.048,0.046,0.044,0.043,0.041,0.039,0.038,0.036,0.034,0.033,0.031,0.029,0.027,0.026,0.024,0.022,0.021,0.019,0.017]}, + '-0.48' : { 'x' : [0,40.29,40.5,40.7,40.7,40.86,41.1,41.04,40.77,40.8,41.01,41.18,41.2,41.07,40.9,40.97,41.34,41.57,41.52,41.41,41.77,41.59,41.5,41.47,41.6,41.81,41.65,41.66,41.53,41.69,41.89,41.98,41.92,42.11,42.16,42.33,42.35,42.21,42.18,42.61,42.46,42.59,42.82,43.05,43.14,43.17,43.05,43.27,43.48,43.22,43.59,43.79,43.93,43.88,43.97,44.49,44.5,44.34,44.37,44.71,44.91,45.25,45.41,45.68,45.81,46.08,46.36,45.88,43.96,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 'y' : [0.322,0.32,0.319,0.317,0.315,0.314,0.312,0.31,0.308,0.307,0.305,0.303,0.302,0.3,0.298,0.297,0.295,0.293,0.291,0.29,0.288,0.286,0.285,0.283,0.281,0.279,0.278,0.276,0.274,0.273,0.271,0.269,0.268,0.266,0.264,0.262,0.261,0.259,0.257,0.256,0.254,0.252,0.251,0.249,0.247,0.245,0.244,0.242,0.24,0.239,0.237,0.235,0.234,0.232,0.23,0.228,0.227,0.225,0.223,0.222,0.22,0.218,0.216,0.215,0.213,0.211,0.21,0.208,0.206,0.205,0.203,0.201,0.199,0.198,0.196,0.194,0.193,0.191,0.189,0.188,0.186,0.184,0.182,0.181,0.179,0.177,0.176,0.174,0.172,0.17,0.169,0.167,0.165,0.164,0.162,0.16,0.159,0.157,0.155,0.153,0.152,0.15,0.148,0.147,0.145,0.143,0.142,0.14,0.138,0.136,0.135,0.133,0.131,0.13,0.128,0.126,0.125,0.123,0.121,0.119,0.118,0.116,0.114,0.113,0.111,0.109,0.107,0.106,0.104,0.102,0.101,0.099,0.097,0.096,0.094,0.092,0.09,0.089,0.087,0.085,0.084,0.082,0.08,0.079,0.077,0.075,0.073,0.072,0.07,0.068,0.067,0.065,0.063,0.061,0.06,0.058,0.056,0.055,0.053,0.051,0.05,0.048,0.046,0.044,0.043,0.041,0.039,0.038,0.036,0.034,0.033,0.031,0.029,0.027,0.026,0.024,0.022,0.021,0.019,0.017]}, + '-0.435' : { 'x' : [0,41.85,41.96,41.94,41.55,41.8,42.07,41.44,41.43,41.63,41.85,41.89,41.55,42.02,42.03,42.33,42.15,41.94,41.88,42.13,42.34,42.17,42.44,42.53,42.72,42.54,42.81,42.82,42.87,43.03,42.94,43.23,43.52,43.53,43.52,43.23,43.32,43.45,43.53,43.62,43.7,43.66,43.94,43.98,44.24,44.24,44.26,44.28,44.46,44.64,44.5,44.16,44.58,44.8,44.68,44.96,45.38,45.62,45.62,45.45,43.99,39.58,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 'y' : [0.322,0.32,0.319,0.317,0.315,0.314,0.312,0.31,0.308,0.307,0.305,0.303,0.302,0.3,0.298,0.297,0.295,0.293,0.291,0.29,0.288,0.286,0.285,0.283,0.281,0.279,0.278,0.276,0.274,0.273,0.271,0.269,0.268,0.266,0.264,0.262,0.261,0.259,0.257,0.256,0.254,0.252,0.251,0.249,0.247,0.245,0.244,0.242,0.24,0.239,0.237,0.235,0.234,0.232,0.23,0.228,0.227,0.225,0.223,0.222,0.22,0.218,0.216,0.215,0.213,0.211,0.21,0.208,0.206,0.205,0.203,0.201,0.199,0.198,0.196,0.194,0.193,0.191,0.189,0.188,0.186,0.184,0.182,0.181,0.179,0.177,0.176,0.174,0.172,0.17,0.169,0.167,0.165,0.164,0.162,0.16,0.159,0.157,0.155,0.153,0.152,0.15,0.148,0.147,0.145,0.143,0.142,0.14,0.138,0.136,0.135,0.133,0.131,0.13,0.128,0.126,0.125,0.123,0.121,0.119,0.118,0.116,0.114,0.113,0.111,0.109,0.107,0.106,0.104,0.102,0.101,0.099,0.097,0.096,0.094,0.092,0.09,0.089,0.087,0.085,0.084,0.082,0.08,0.079,0.077,0.075,0.073,0.072,0.07,0.068,0.067,0.065,0.063,0.061,0.06,0.058,0.056,0.055,0.053,0.051,0.05,0.048,0.046,0.044,0.043,0.041,0.039,0.038,0.036,0.034,0.033,0.031,0.029,0.027,0.026,0.024,0.022,0.021,0.019,0.017]}, + '0.251' : { 'x' : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6.74,34.57,38.8,41.9,44.27,45.66,46.29,46.68,46.86,46.92,46.92,46.94,47.09,47.08,47.09,47.13,47.19,47.15,47.16,47.21,47.22,47.21,47.28,47.21,47.19,47.22,37.85,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + '0.358' : { 'x' : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3.94,20.73,29.28,32.38,35.13,37.59,39.75,41.55,42.9,43.76,44.35,44.66,44.83,44.96,45.03,45.08,45.12,45.17,45.23,45.25,45.25,45.29,45.31,45.32,45.35,45.37,45.4,45.4,45.42,45.42,45.43,45.43,45.42,45.41,45.42,45.42,45.41,45.42,36.4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + '0.46' : { 'x' : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.62,3.27,5.68,7.33,9.09,11.06,13.29,15.72,18.33,21,23.82,26.69,29.4,31.74,33.71,35.38,36.78,37.94,38.86,39.55,40.05,40.4,40.67,40.88,41.01,41.16,41.3,41.37,41.42,41.52,41.69,41.78,41.87,41.94,42.03,42.12,42.17,42.26,42.34,42.37,42.4,42.46,42.54,42.6,42.65,42.71,42.77,42.79,42.84,42.91,42.94,42.93,42.92,42.95,42.99,43.03,43.05,43.07,43.06,43.05,35.53,42.53,42.53,42.53,42.61,42.85,42.97,42.96,42.82,42.94,42.99,42.91,42.96,42.96,42.97,42.96,42.93,43.03,43.06,43.04,42.96,42.97,42.88,36.36,6.99,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + '0.56' : { 'x' : [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.8,4.1,5.12,5.52,5.96,6.53,7.23,8.1,9.14,10.25,11.44,12.84,14.33,15.88,17.52,19.19,20.87,22.59,24.4,26.23,28.04,29.84,31.6,33.31,34.81,36.13,37.25,38.11,38.82,39.34,39.69,39.98,40.2,40.28,40.38,40.5,40.48,40.52,40.49,40.71,40.81,40.79,40.81,40.87,40.98,41.04,41,41.04,41.27,41.33,41.33,41.35,41.45,41.52,41.59,41.71,41.76,41.72,41.75,41.74,41.72,41.75,41.77,41.79,41.87,41.91,41.9,41.86,41.91,41.95,41.89,41.91,41.92,42.03,42.03,42.01,42.09,42.16,42.18,42.22,42.23,42.31,42.28,42.35,42.37,42.39,42.47,42.47,42.43,42.54,42.55,42.52,42.5,42.52,42.62,42.69,42.61,42.62,42.69,42.67,42.67,42.69,42.65,42.52,36.01,6.93,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + '0.67' : { 'x' : [0,0,0,0,1.04,15.87,20.99,23.7,24.83,25.68,26.54,26.88,27.21,27.38,27.23,27.19,27.13,27.08,27.03,26.97,26.92,26.68,26.42,26.16,25.82,25.48,25.09,24.67,24.25,23.82,23.39,22.89,22.37,21.82,21.27,20.69,20.03,19.27,18.49,17.81,17.1,16.34,15.53,14.64,13.75,12.84,11.96,11.08,10.19,9.28,8.33,7.39,6.5,5.64,4.8,4.02,3.15,2.27,1.41,0.7,0.01,-0.69,-1.35,-1.97,-2.54,-3.03,-3.5,-3.93,-4.29,-4.59,-4.82,-4.98,-5.16,-5.35,-5.52,-5.67,-5.81,-5.96,-5.97,-5.94,-5.88,-5.88,-5.85,-5.77,-5.65,-5.55,-5.48,-5.4,-5.31,-5.19,-5.03,-4.88,-4.72,-4.53,-4.33,-4.12,-3.9,-3.68,-3.46,-3.23,-2.98,-2.73,-2.48,-2.21,-1.94,-1.67,-1.43,-1.18,-0.9,-0.57,-0.24,0.1,0.42,0.78,1.15,1.53,1.97,2.44,2.93,3.45,3.97,4.46,4.98,5.48,5.95,6.33,6.76,7.23,7.66,8.17,8.76,9.28,9.78,10.26,10.71,11.25,11.84,12.4,12.96,13.49,13.97,14.41,14.85,15.2,15.49,15.81,16.13,16.46,16.82,17.2,17.59,18.02,18.49,19,19.62,20.32,21.07,21.88,22.75,23.68,24.7,25.71,26.75,27.84,28.95,30.06,31.18,32.29,33.38,34.54,35.61,36.6,37.48,38.2,38.81,39.31,39.68,39.96,40.11,40.22,40.34,40.47,40.52,40.59,40.72,40.74,40.79,40.93,40.97,40.98,41.02,41.03,41.05,41.12,41.18,41.23,41.25,41.13,41.14,41.18,41.33,41.37,41.33,41.33,41.36,41.41,41.39,41.46,41.41,41.37,41.44,41.48,41.48,41.52,41.59,41.62,41.59,41.6,41.7,41.71,41.72,41.74,41.67,41.71,41.72,41.78,41.82,41.88,41.93,41.92,41.94,41.95,41.9,41.91,41.98,41.84,41.83,41.91,42,41.89,41.82,41.79,41.9,42.04,42,42.09,42.12,42.09,42.01,42,41.96,41.86,41.87,41.93,42.04,42.12,42.2,42.13,42.16,42.26,42.18,42.17,35.63,6.85,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + '0.77' : { 'x' : [0,0,0,0,0,0,8.78,20.92,22.19,22.91,23.37,23.89,24.11,24.22,24.29,24.32,24.26,24.18,24.1,24.02,23.78,23.41,23.15,22.96,22.33,21.78,21.3,20.81,20.35,19.91,19.46,19,18.55,18.06,17.47,16.86,16.31,15.73,15.13,14.49,13.85,13.21,12.6,12.12,11.62,11.09,10.45,9.87,9.31,8.69,8.1,7.52,6.91,6.34,5.77,5.24,4.77,4.31,3.82,3.41,3.02,2.66,2.2,1.8,1.46,1.15,0.91,0.71,0.55,0.46,0.4,0.38,0.25,0.11,0.11,0.11,0.08,-0.01,0.01,0.04,0.04,0.19,0.42,0.71,0.94,1.18,1.43,1.68,2.03,2.46,2.89,3.33,3.78,4.18,4.59,5,5.37,5.81,6.29,6.82,7.2,7.6,8.07,8.58,9.09,9.63,10.24,10.71,11.11,11.67,12.16,12.54,12.88,13.24,13.63,14.05,14.47,14.89,15.3,15.69,16.05,16.39,16.84,17.23,17.56,17.88,18.17,18.47,18.81,19.1,19.42,19.74,20.06,20.38,20.64,20.88,21.12,21.53,21.94,22.38,22.91,23.46,23.97,24.5,25.15,25.84,26.52,27.26,28,28.79,29.53,30.26,31,31.73,32.45,33.15,33.88,34.52,35.16,35.82,36.37,36.82,37.19,37.53,37.76,37.89,38,38.11,38.22,38.28,38.4,38.57,38.63,38.75,38.87,38.97,38.99,38.97,39.11,39.27,39.45,39.51,39.63,39.74,39.79,39.62,39.59,39.62,39.72,39.8,39.88,39.99,40.07,40.18,40.16,40.17,40.19,40.19,40.2,40.18,40.26,40.29,40.34,40.4,40.44,40.43,40.56,40.55,40.48,40.44,40.44,40.46,40.43,40.51,40.57,40.57,40.51,40.58,40.67,40.72,40.74,40.82,40.89,40.89,40.87,40.82,40.78,40.8,40.82,40.87,40.94,40.97,41.08,41.12,41.21,41.1,40.99,40.93,41.02,41.1,41.21,41.24,41.21,41.15,41.2,41.3,41.23,41.03,41.09,41.12,41.18,41.13,41.13,41.24,41.11,41.06,41.1,41.12,41.11,41.25,41.26,41.12,34.77,6.69,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + '0.88' : { 'x' : [0,0,0,0,0,0,6.03,14.35,15.53,16.04,16.39,16.85,17.5,18.12,18.39,18.37,18.38,18.49,18.7,18.97,19.19,19.02,18.9,18.82,18.58,18.23,17.88,17.71,17.56,17.45,17.21,16.91,16.55,16.04,15.83,15.63,15.36,15.07,14.8,14.56,14.4,14.1,13.7,13.25,13,12.92,12.79,12.59,12.36,12.17,11.9,11.66,11.58,11.46,11.37,11.34,11.26,11.19,11.21,11.32,11.44,11.54,11.55,11.63,11.77,11.79,11.89,12.08,12.26,12.48,12.66,12.63,12.83,13.08,13.32,13.55,13.8,14.12,14.32,14.61,15.07,15.38,15.66,15.75,16.05,16.19,16.32,16.55,16.96,17.49,17.7,17.95,18.29,18.72,18.82,19,19.32,19.52,19.74,19.92,20.14,20.4,20.59,20.84,21.05,21.27,21.56,21.88,22.2,22.18,22.17,22.15,22.6,22.87,23.2,23.43,23.53,23.71,23.95,24.05,24.25,24.79,25.28,25.66,25.81,26.17,26.5,26.83,27.15,27.48,27.9,28.38,28.94,29.47,29.91,30.46,30.94,31.44,31.9,32.26,32.5,32.91,33.31,33.65,33.97,34.2,34.23,34.72,35.07,35.33,35.63,35.91,36.19,36.46,36.66,36.73,36.68,36.64,36.75,36.82,37.04,37.24,37.24,37.1,37.06,37.26,37.43,37.47,37.35,37.13,37.04,36.73,36.75,37.17,37.45,37.55,37.7,37.93,37.88,37.79,37.81,38.11,38.18,38.22,38.2,38.29,38.26,38.2,38.2,38.35,38.46,38.6,38.66,38.65,38.84,38.98,39.1,39.15,39.09,38.92,38.92,39.01,39.04,39.2,39.25,39.38,39.36,39.39,39.3,39.34,39.4,39.41,39.52,39.58,39.59,39.48,39.53,39.52,39.59,39.59,39.62,39.58,39.65,39.76,39.77,39.55,39.69,39.77,39.84,39.74,39.81,40.13,40.1,40.16,40.23,40.09,40.04,40.15,40.12,39.98,39.82,39.57,39.49,39.49,39.7,39.89,40.13,40.47,40.34,40.23,40,39.89,39.88,40.03,40.31,40.4,40.36,40.19,40.04,39.88,40.24,39.8,33.53,6.45,0], 'y' : [0.003,0.005,0.007,0.008,0.01,0.011,0.013,0.015,0.016,0.018,0.019,0.021,0.023,0.024,0.026,0.028,0.029,0.031,0.032,0.034,0.036,0.037,0.039,0.04,0.042,0.044,0.045,0.047,0.049,0.05,0.052,0.053,0.055,0.057,0.058,0.06,0.061,0.063,0.065,0.066,0.068,0.07,0.071,0.073,0.074,0.076,0.078,0.079,0.081,0.083,0.084,0.086,0.087,0.089,0.091,0.092,0.094,0.095,0.097,0.099,0.1,0.102,0.104,0.105,0.107,0.108,0.11,0.112,0.113,0.115,0.116,0.118,0.12,0.121,0.123,0.125,0.126,0.128,0.129,0.131,0.133,0.134,0.136,0.138,0.139,0.141,0.142,0.144,0.146,0.147,0.149,0.15,0.152,0.154,0.155,0.157,0.159,0.16,0.162,0.163,0.165,0.167,0.168,0.17,0.171,0.173,0.175,0.176,0.178,0.18,0.181,0.183,0.184,0.186,0.188,0.189,0.191,0.193,0.194,0.196,0.197,0.199,0.201,0.202,0.204,0.205,0.207,0.209,0.21,0.212,0.214,0.215,0.217,0.218,0.22,0.222,0.223,0.225,0.226,0.228,0.23,0.231,0.233,0.235,0.236,0.238,0.239,0.241,0.243,0.244,0.246,0.247,0.249,0.251,0.252,0.254,0.256,0.257,0.259,0.26,0.262,0.264,0.265,0.267,0.269,0.27,0.272,0.273,0.275,0.277,0.278,0.28,0.281,0.283,0.285,0.286,0.288,0.29,0.291,0.293,0.294,0.296,0.298,0.299,0.301,0.302,0.304,0.306,0.307,0.309,0.311,0.312,0.314,0.315,0.317,0.319,0.32,0.322,0.324,0.325,0.327,0.328,0.33,0.332,0.333,0.335,0.336,0.338,0.34,0.341,0.343,0.345,0.346,0.348,0.349,0.351,0.353,0.354,0.356,0.357,0.359,0.361,0.362,0.364,0.366,0.367,0.369,0.37,0.372,0.374,0.375,0.377,0.378,0.38,0.382,0.383,0.385,0.387,0.388,0.39,0.391,0.393,0.395,0.396,0.398,0.4,0.401,0.403,0.404,0.406,0.408,0.409,0.411,0.412,0.414,0.416,0.417,0.419,0.421,0.422,0.424,0.425,0.427,0.429,0.43]}, + + } + + xData =[-0.781, -0.614, -0.521, -0.48, -0.435, 0.251, 0.358, 0.46, 0.56, 0.67, .77, 0.88] + + + for i in xData: + #Extract y dimension + refY = np.array(testData[str(i)]['y']) + #u is already converted to model units (m/s) no need to convert reference velocity + #Ref + refX = np.array(testData[str(i)]['x']) + + #From reference x0 (rear of body) find x1 for plot + x1 = x0[0] + i + + + print(f' Start x1 is {x1}, {x0[1]}, {x0[2]}') + print(f' End x1 is {x1}, {x0[1]}, {x0[2]+1.0}') + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{prefix}_{str(i)}") + wp.synchronize() + IOexporter.to_line( + filename, + {"velocity": sim.u}, + start_point=(x1, x0[1], x0[2]), + end_point=(x1, x0[1], x0[2]+1.0), + resolution=250, + component=0, + radius=delta_x_coarse #needed with model units + ) + # read the CSV written by the exporter + csv_path = filename + "_velocity_0.csv" # adjust if your exporter uses another extension + print(f"CSV path is {csv_path}") + + try: + sim_z, sim_ux = _load_sim_line(csv_path) + except Exception as e: + print(f"Failed to read {csv_path}: {e}") + continue + + # plot reference vs simulation + plt.figure(figsize=(4.5, 6)) + plt.plot(refX, refY, 'o', mfc='none', label='Experimental)') + plt.plot(sim_ux, sim_z, '-', lw=2, label='Simulation') + plt.xlim(np.min(refX)*.9, np.max(refX)*1.1) + plt.ylim(np.min(refY), np.max(refY)) + plt.xlabel('Ux [m/s]') + plt.ylabel('z [m]') + plt.title(f'Velocity Plot at {i:+.3f}') + plt.grid(True, alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(filename + ".png", dpi=150) + plt.close() + + + +# Main Script +# =========== +# Initialize XLB + +xlb.init( + velocity_set=velocity_set, + default_backend=compute_backend, + default_precision_policy=precision_policy, +) + +# Generate mesh +if mesher_type == "makemesh": + level_data, body_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 = generate_makemesh_mesh( + stl_filename, voxel_size, trim, trim_voxels + ) +elif mesher_type == "cuboid": + level_data, body_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 = generate_cuboid_mesh( + stl_filename, voxel_size, trim, trim_voxels + ) +else: + raise ValueError(f"Invalid mesher_type: {mesher_type}. Must be 'makemesh' or 'cuboid'.") + +# Characteristic length +L = partSize[0] +L = float(L) # Cast to built-in float to avoid NumPy type propagation issues with Warp + +# Compute Re +Re = u_physical * L / kinematic_viscosity + +# Calculate lattice parameters +delta_x_coarse = voxel_size * 2 ** (actual_num_levels - 1) +delta_t = voxel_size * ulb / u_physical +nu_lattice = kinematic_viscosity * delta_t / (voxel_size ** 2) +tau = (3.0 * nu_lattice + 0.5) +#omega = 1.0 / (3.0 * nu_lattice + 0.5) +omega = [1/tau] +# Compute tau and omega for coarser levels (level 1 to actual_num_levels-1) +for level in range(1, actual_num_levels): + #tau = 2.0 * tau - 0.5 + tau = tau / 2.0 + 0.25 + omega_level = 1.0 / tau + omega.append(omega_level) + +# Print the omega array for verification +print("Omega array for all levels (finest to coarsest):", omega) + + +# Create output directory +current_dir = os.path.join(os.path.dirname(__file__)) +output_dir = os.path.join(current_dir, script_name) +if os.path.exists(output_dir): + shutil.rmtree(output_dir) +os.makedirs(output_dir) + +# Define exporter objects +field_name_cardinality_dict = {"velocity": 3, "density": 1} +h5exporter = MultiresIO( + field_name_cardinality_dict, + level_data, + scale=voxel_size, + offset=-shift, + timestep_size=delta_t, +) +bc_mask_exporter = MultiresIO({"bc_mask": 1}, level_data, scale=voxel_size, offset=-shift,) + +# Create grid +grid = multires_grid_factory( + grid_shape_zip, + velocity_set=velocity_set, + sparsity_pattern_list=sparsity_pattern, + sparsity_pattern_origins=[neon.Index_3d(*box_origin) for box_origin in level_origins], +) + +# Calculate num_steps +coarsest_level = grid.count_levels - 1 +grid_shape_x_coarsest = grid.level_to_shape(coarsest_level)[0] +num_steps = int(flow_passes * (grid_shape_x_coarsest / ulb)) + +# Calculate print and file output intervals +print_interval = max(1, int(num_steps * (print_interval_percentage / 100.0))) +crossover_step = int(num_steps * (file_output_crossover_percentage / 100.0)) +file_output_interval_pre_crossover = max(1, int(crossover_step / num_file_outputs_pre_crossover)) if num_file_outputs_pre_crossover > 0 else num_steps + 1 +file_output_interval_post_crossover = max(1, int((num_steps - crossover_step) / num_file_outputs_post_crossover)) if num_file_outputs_post_crossover > 0 else num_steps + 1 +final_print_interval = max(1, int((num_steps-crossover_step) * (print_interval_percentage / 100.0))) +# Setup boundary conditions +boundary_conditions = setup_boundary_conditions(grid, level_data, body_vertices, ulb, num_steps, compute_backend) + +# Create initializer +initializer = CustomMultiresInitializer( + bc_id=boundary_conditions[-2].id, # bc_outlet + constant_velocity_vector=(ulb, 0.0, 0.0), + velocity_set=velocity_set, + precision_policy=precision_policy, + compute_backend=compute_backend, +) + +# Initialize simulation +sim = initialize_simulation(grid, boundary_conditions, omega, initializer) + +# Compute voxel statistics and reference area +stats = compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size) +active_voxels = stats["active_voxels"] +solid_voxels = stats["solid_voxels"] +total_voxels = stats["total_voxels"] +total_lattice_updates_per_step = stats["total_lattice_updates_per_step"] +reference_area = stats["reference_area"] +reference_area_physical = stats["reference_area_physical"] + +# Save initial bc_mask +filename = os.path.join(output_dir, f"{script_name}_initial_bc_mask") +bc_mask_exporter.to_hdf5(filename, {"bc_mask": sim.bc_mask}, compression="gzip", compression_opts=1) +wp.synchronize() + + +# Setup momentum transfer +# momentum_transfer = MultiresMomentumTransfer(boundary_conditions[-1], compute_backend=compute_backend) # bc_body + +momentum_transfer = MultiresMomentumTransfer( + boundary_conditions[-1], + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + compute_backend=compute_backend, +) + +# Print simulation info +print("\n" + "=" * 50 + "\n") +print(f"Simulation Configuration for Re = {Re}:") +# print(f"Grid shape at finest level: {grid_shape_zip}") +# print(f"Grid shape at coarsest level: {grid.level_to_shape(coarsest_level)}") +print(f"Number of flow passes: {flow_passes}") +print(f"Calculated iterations: {num_steps:,}") +# print(f"Output directory: {output_dir}") +# print(f"Print interval: {print_interval} steps (every {print_interval_percentage}% of iterations)") +# print(f"File output interval pre-crossover (0-{file_output_crossover_percentage}%): {file_output_interval_pre_crossover} steps") +# print(f"File output interval post-crossover ({file_output_crossover_percentage}-100%): {file_output_interval_post_crossover} steps") +print(f"Finest voxel size: {voxel_size} meters") +print(f"Coarsest voxel size: {delta_x_coarse} meters") +print(f"Total voxels: {sum(np.count_nonzero(mask) for mask in sparsity_pattern):,}") +print(f"Total active voxels: {total_voxels:,}") +print(f"Active voxels per level: {active_voxels}") +print(f"Solid voxels per level: {solid_voxels}") +print(f"Total lattice updates per global step: {total_lattice_updates_per_step:,}") +print(f"Actual number of refinement levels: {actual_num_levels}") +print(f"Physical inlet velocity: {u_physical:.4f} m/s") +print(f"Lattice velocity (ulb): {ulb}") +print(f"Characteristic length: {L: .4f} meters") +# print(f"Kinematic viscosity: {kinematic_viscosity} m^2/s") +print(f"Computed reference area (bc_mask): {reference_area} lattice units") +print(f"Physical reference area (bc_mask): {reference_area_physical:.6f} m^2") +print(f"Reynolds number: {Re:,.2f}") +# print(f"Lattice viscosity: {nu_lattice:.5f}") +#print(f"Relaxation parameter (omega): {omega:.5f}") +print (f"ULB Mach from inlet= {ma:.4f}") +print("\n" + "=" * 50 + "\n") + +# -------------------------- Simulation Loop -------------------------- +wp.synchronize() +start_time = time.time() +compute_time = 0.0 +steps_since_last_print = 0 +drag_values = [] +for step in range(num_steps): + step_start = time.time() + sim.step() + wp.synchronize() + compute_time += time.time() - step_start + steps_since_last_print += 1 + + if (step % print_interval == 0 and step < crossover_step) or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + if step % (10*print_interval) == 0: + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2000, + bounds=(0, 1, 0, 1), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = u_physical*1.75, #eventually we could have the 1.5 read from json as we did before + ) + end_time = time.time() + elapsed = end_time - start_time + total_lattice_updates = total_lattice_updates_per_step * steps_since_last_print + MLUPS = total_lattice_updates / compute_time / 1e6 if compute_time > 0 else 0.0 + current_flow_passes = step * ulb / grid_shape_x_coarsest + remaining_steps = num_steps - step - 1 + time_remaining = 0.0 if MLUPS == 0 else (total_lattice_updates_per_step * remaining_steps) / (MLUPS * 1e6) + hours, rem = divmod(time_remaining, 3600) + minutes, seconds = divmod(rem, 60) + time_remaining_str = f"{int(hours):02d}h {int(minutes):02d}m {int(seconds):02d}s" + percent_complete = (step + 1) / num_steps * 100 + print(f"Completed step {step}/{num_steps} ({percent_complete:.2f}% complete)") + print(f" Flow Passes: {current_flow_passes:.2f}") + print(f" Time elapsed: {elapsed:.1f}s, Compute time: {compute_time:.1f}s, ETA: {time_remaining_str}") + print(f" MLUPS: {MLUPS:.1f}") + print(f" Cd= {cd:.3f}, Cl= {cl:.3f}, Drag Force (lattice units)={drag:.3f}") + start_time = time.time() + compute_time = 0.0 + steps_since_last_print = 0 + file_output_interval = file_output_interval_pre_crossover if step < crossover_step else file_output_interval_post_crossover + if step % file_output_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + + wp.synchronize() + if step >= crossover_step and step % final_print_interval ==0 : + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + if step % (10*final_print_interval) == 0: + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2000, + bounds=(0, 1, 0, 1), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = u_physical*1.75, #eventually we could have the 1.5 read from json as we did before + ) + print(f"Completed step {step}/{num_steps} ") + print(f" Cd= {cd:.3f}, Cl= {cl:.3f}, Drag Force (lattice units)={drag:.3f}") + + if step == num_steps - 1: + plot_data(x0, output_dir, delta_x_coarse, sim, h5exporter, prefix='Drivaer_Fastback') + +# Save drag and lift data to CSV +if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, script_name) + +# Calculate and print average Cd and Cl for the last 50% +drag_values_array = np.array(drag_values) +if len(drag_values) > 0: + start_index = int(len(drag_values) * file_output_crossover_percentage/100) + last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(last_half[:, 0]) + avg_cl = np.mean(last_half[:, 1]) + print(f"Average Drag Coefficient (Cd) for last {(100-file_output_crossover_percentage)}%: {avg_cd:.6f}") + print(f"Average Lift Coefficient (Cl) for last {(100-file_output_crossover_percentage)}%: {avg_cl:.6f}") + print(f"Experimental Drag Coefficient (Cd): {0.3088}") + print(f"Error Drag Coefficient (Cd): {((avg_cd-0.3088)/0.3088)*100:.2f}%") + +else: + print("No drag or lift data collected.") + diff --git a/examples/mres_json.py b/examples/mres_json.py new file mode 100644 index 00000000..24c4bc85 --- /dev/null +++ b/examples/mres_json.py @@ -0,0 +1,1256 @@ +import neon +import warp as wp +import numpy as np +import os, sys, time, trimesh +import matplotlib.pyplot as plt + +import xlb +from xlb.compute_backend import ComputeBackend +from xlb.precision_policy import PrecisionPolicy +from xlb.grid import multires_grid_factory +from xlb.operator.boundary_condition import ( + FullwayBounceBackBC, + HalfwayBounceBackBC, + RegularizedBC, + ExtrapolationOutflowBC, + DoNothingBC, + ZouHeBC, + HybridBC, +) +from xlb.operator.boundary_masker import MeshVoxelizationMethod +from xlb.utils.mesher import make_cuboid_mesh, MultiresIO +from xlb.utils.makemesh import generate_mesh +from xlb.operator.force import MultiresMomentumTransfer +from xlb.helper.initializers import CustomMultiresInitializer +from xlb import MresPerfOptimizationType +import httpx, logging, getopt, json +from json.decoder import JSONDecodeError +from uuid import uuid4 +from threading import Thread +from typing import Any +# Use 8 CPU devices if running on ACP +acp_env = os.environ.get('ACP_ENVIRONMENT', '') +if acp_env not in ('', 'local'): + os.environ["XLA_FLAGS"] = '--xla_force_host_platform_device_count=8' + +WORKER_PROTOCOL = os.environ.get('SCM_PROTOCOL', '') +WORKER_HOST = os.environ.get('SCM_HOST', '') +WORKER_PORT = os.environ.get('SCM_PORT', '') + +HEARTBEAT_SLEEP = int(float(os.environ.get('SCM_SOLVERHEARTBEAT', 1000)) / 1000) +HEARTBEAT_THREAD = None +HEARTBEAT_CANCELLED = False + +### SCM Functions ### +def running_via_scm(): + """ + Checks if the code is running via the SCM worker protocol. + + Returns: + bool: True if WORKER_PROTOCOL is set (indicating execution via SCM), False otherwise. + """ + + if WORKER_PROTOCOL: + return True + + return False + +def scm_event(endpoint, data=0, event_id=''): + """ + Sends an event to a specified SCM worker endpoint using HTTP POST and returns the response. + + Args: + endpoint (str): The endpoint path to send the event to. + data (int, optional): The data payload to send. Defaults to 0. + event_id (str, optional): An identifier for the event. Defaults to ''. + + Returns: + Any: The 'response' field from the JSON response if available, otherwise the provided event_id. + + Notes: + - If any of WORKER_PROTOCOL, WORKER_HOST, WORKER_PORT, or endpoint are not set, returns the event_id. + - If the response cannot be decoded as JSON, returns the event_id. + """ + + if not endpoint or not WORKER_PROTOCOL or not WORKER_HOST or not WORKER_PORT: + return event_id + + url = f'{WORKER_PROTOCOL}://{WORKER_HOST}:{WORKER_PORT}{endpoint}' + + headers = { + 'Content-Type': 'application/json' + } + + data = { + 'data': data, + 'id': event_id, + } + + response = httpx.post(url, headers=headers, json=data) + + try: + return response.json().get('response', event_id) + except JSONDecodeError: + return event_id + + return event_id + +def heartbeat(): + """ + Continuously sends a heartbeat signal to the compute worker endpoint to indicate the process is alive. + + The function repeatedly calls the `scm_event` function with the '/ComputeWorker/v1/heartbeat' endpoint. + If the response is 'canceled' or the global variable `HEARTBEAT_CANCELLED` is set to True, the loop breaks and the function returns. + Otherwise, the function sleeps for a duration specified by the global variable `HEARTBEAT_SLEEP` before sending the next heartbeat. + + Returns: + None + """ + + while True: + response = scm_event('/ComputeWorker/v1/heartbeat') + + if response == 'canceled' or HEARTBEAT_CANCELLED: + return + + time.sleep(HEARTBEAT_SLEEP) + +def scm_init(): + """ + Performs SCM initialization by attaching to the compute worker and starting the heartbeat thread. + + This function performs the following actions: + 1. Sends an attach event to the compute worker endpoint. + 2. Creates and starts a global heartbeat thread to maintain regular communication and status checks. + + Globals: + HEARTBEAT_THREAD: Thread object responsible for running the heartbeat function. + + Side Effects: + Modifies the global HEARTBEAT_THREAD variable and starts a new thread. + """ + + global HEARTBEAT_THREAD + + scm_event('/ComputeWorker/v1/attach', 1) + + HEARTBEAT_THREAD = Thread(target=heartbeat) + HEARTBEAT_THREAD.start() + + scm_progress(0) + +def scm_progress(progress): + """ + Sends a progress update to the SCM compute worker. + + Args: + progress (int): The progress value to send, between 0 and 100. + + Returns: + None + """ + + scm_event('/ComputeWorker/v1/progress', progress) + +def scm_results_available(final_update=False): + """ + Notifies that results are available by sending an event to the '/ComputeWorker/v1/results' endpoint. + + Args: + final_update (bool, optional): Indicates whether this is the final update. Defaults to False. + + Returns: + None + """ + + scm_event('/ComputeWorker/v1/results', int(final_update)) + +def scm_cancel_heartbeat(): + """ + Cancels the ongoing heartbeat process by setting the HEARTBEAT_CANCELLED flag to True. + If a heartbeat thread is running, waits for it to finish and then resets the thread reference. + """ + + global HEARTBEAT_CANCELLED + global HEARTBEAT_THREAD + + HEARTBEAT_CANCELLED = True + if HEARTBEAT_THREAD: + HEARTBEAT_THREAD.join() + HEARTBEAT_THREAD = None + +def scm_set_error(code, message): + """ + Sets an error state by sending an error code and message to the ComputeWorker event handler. + + Args: + code (int): The error code representing the type of error. + message (str): A descriptive message explaining the error. + + Returns: + None + + Side Effects: + Triggers the '/ComputeWorker/v1/seterror' event with the provided code and message. + """ + + scm_event('/ComputeWorker/v1/seterror', code, message) + +def scm_complete(): + """ + Notifies the SCM worker that the process is complete by sending a completion event. + + Returns: + None + """ + + scm_progress(100) + + scm_event('/ComputeWorker/v1/complete', 1, str(uuid4())) + + scm_cancel_heartbeat() +#################### + +wp.clear_kernel_cache() +wp.config.quiet = False + +def prep_inputs(input_file): + start_time = time.time() + f = open(input_file) + jsonfile = json.load(f) + proj_path = os.path.dirname(os.path.abspath(input_file)) + jsonfile['projPath'] = proj_path + settings = jsonfile['settings'] + voxel_size = settings['voxelSize'] + ulb = settings['ulb'] + # Extract the inlet velocity from the json dict + prescribed_velocity_phys = jsonfile['InletBC']['x'] + if running_via_scm(): + output_dir = proj_path + else: + output_dir = os.path.join(proj_path, jsonfile['outputName']) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for fx in [os.path.join(output_dir,f) for f in os.listdir(output_dir)]: + os.remove(fx) + + + with open(os.path.join(output_dir, "project.log"),'w') as fd: + fd.write("*** Studio Wind Tunnel Solver Log File ***\n\n\n") + fd.write("Date Created: "+time.asctime(time.localtime())+" \n\n") + fd.write("Processing input json ... \n\n") + logging.info("Processing input json ...") + + # Set accuracy and lattice type + if settings['doublePrecision']==True: + precision_policy = PrecisionPolicy.FP64FP64 + elif settings['doublePrecision']==-1: + precision_policy = PrecisionPolicy.FP16FP16 + else: + precision_policy = PrecisionPolicy.FP32FP32 + + compute_backend = ComputeBackend.NEON + velocity_set = xlb.velocity_set.D3Q27(precision_policy=precision_policy, compute_backend=compute_backend) + + ### Process Car for obj and scale + body_stl = os.path.join(proj_path, str(jsonfile['vehicle']['body'])) + filename, file_extension = os.path.splitext(body_stl) + + body_mesh = trimesh.load_mesh(body_stl, process=False) + if file_extension =='.obj': + body_mesh.apply_scale(0.01) + body_mesh.export(os.path.join(output_dir, filename+'.stl')) + body_mesh = trimesh.load_mesh(os.path.join(output_dir, filename+'.stl'), process=False) + + #If any wheels listed + if len(jsonfile['vehicle']['wheels']) > 0: + wheel_stls = [] + for wheel in jsonfile['vehicle']['wheels']: + wheel = os.path.join(proj_path, wheel) + wheel_stls.append(wheel) + wheel_meshes =[] + w=1 + for wheel in wheel_stls: + wheel_mesh = trimesh.load_mesh(wheel, process=False) + if file_extension =='.obj': + wheel_mesh.apply_scale(0.01) + wheel_mesh.export(os.path.join(output_dir, 'wheel'+str(w)+'.stl')) + wheel_mesh = trimesh.load_mesh(os.path.join(output_dir, 'wheel'+str(w)+'.stl')) + w+=1 + wheel_meshes.append(wheel_mesh) + + car_mesh = trimesh.util.concatenate(body_mesh + wheel_meshes) + else: + car_mesh = body_mesh + wheel_meshes=None + + # =========== + # Initialize XLB + xlb.init( + velocity_set=velocity_set, + default_backend=compute_backend, + default_precision_policy=precision_policy, + ) + + + level_data, body_vertices, wheel_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins = mesh_prep( + voxel_size, car_mesh, body_mesh, wheel_meshes, output_dir, jsonfile + ) + + + # Characteristic length + L = float(partSize[0]) + #Material Setup + material = jsonfile['fluid'] + density = material['density'] + dynamic_viscosity = material['viscosity'] + kinematic_viscosity = dynamic_viscosity / density + + # Compute Re + Re = abs(prescribed_velocity_phys) * L / kinematic_viscosity + + # Calculate lattice parameters + delta_x_coarse = voxel_size * 2 ** (actual_num_levels - 1) + delta_t = voxel_size * ulb / prescribed_velocity_phys + lbm_visc = kinematic_viscosity * delta_t / (voxel_size ** 2) + omega = 1.0 / (3.0 * lbm_visc + 0.5) + + # Define exporter objects + + field_name_cardinality_dict = {"velocity": 3, "density": 1} + h5exporter = MultiresIO( + field_name_cardinality_dict, + level_data, + scale=voxel_size, + offset=-shift, + timestep_size=delta_t, + ) + bc_mask_exporter = MultiresIO({"bc_mask": 1}, level_data, scale=voxel_size, offset=-shift) + + # Create grid + grid = multires_grid_factory( + grid_shape_zip, + velocity_set=velocity_set, + sparsity_pattern_list=sparsity_pattern, + sparsity_pattern_origins=[neon.Index_3d(*box_origin) for box_origin in level_origins], + ) + # Calculate num_steps + coarsest_level = grid.count_levels - 1 + grid_shape_x_coarsest = grid.level_to_shape(coarsest_level)[0] + if jsonfile['settings']['flowPasses'] > 0: + num_steps = int(jsonfile['settings']['flowPasses'] * (grid_shape_x_coarsest / ulb)) + else: + num_steps = int(jsonfile['settings']['iterations']) + + + # Setup boundary conditions + boundary_conditions = setup_boundary_conditions(grid, level_data, body_vertices, wheel_vertices, ulb, lbm_visc, grid_shape_zip, precision_policy, jsonfile, velocity_set, compute_backend) + + # Create initializer + initializer = CustomMultiresInitializer( + bc_id=boundary_conditions[-2].id, # bc_outlet + constant_velocity_vector=(ulb, 0.0, 0.0), + velocity_set=velocity_set, + precision_policy=precision_policy, + compute_backend=compute_backend, + ) + + # Initialize simulation + sim = xlb.helper.MultiresSimulationManager( + omega=omega, + grid=grid, + boundary_conditions=boundary_conditions, + collision_type="KBC", + initializer=initializer, + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + ) + + # Compute voxel statistics and reference area + stats = compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size) + active_voxels = stats["active_voxels"] + solid_voxels = stats["solid_voxels"] + total_voxels = stats["total_voxels"] + total_lattice_updates_per_step = stats["total_lattice_updates_per_step"] + reference_area = stats["reference_area"] + reference_area_physical = stats["reference_area_physical"] + + wp.synchronize() + + # Setup momentum transfer + momentum_transfer = MultiresMomentumTransfer( + boundary_conditions[-1], + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + compute_backend=compute_backend, + ) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write('Material Properties\n') + fd.write('___________________\n') + fd.write(f'Density: {density:.4f} kg/m3\n') + fd.write(f'Visc Dyn: {dynamic_viscosity:.4e} Pa-s\n') + fd.write(f'Visc Kin: {kinematic_viscosity:.4e} m2/s\n') + fd.write(f'Visc LBM: {lbm_visc:.4e} \n\n') + fd.write('Boundary Setup\n') + fd.write('___________________\n') + fd.write(f"Walls: {jsonfile['BCtypes']['walls']}\n") + fd.write(f"Ground: {jsonfile['BCtypes']['ground']}\n") + fd.write('\nSolver Parameters\n') + fd.write('___________________\n') + fd.write(f"Number of flow passes: {jsonfile['settings']['flowPasses']}\n") + fd.write(f"Calculated iterations: {num_steps:,}\n") + fd.write(f"Finest voxel size: {voxel_size} meters\n") + fd.write(f"Coarsest voxel size: {delta_x_coarse} meters\n") + fd.write(f"Total voxels: {sum(np.count_nonzero(mask) for mask in sparsity_pattern):,}\n") + fd.write(f"Total active voxels: {total_voxels:,}\n") + fd.write(f"Active voxels per level: {active_voxels}\n") + fd.write(f"Solid voxels per level: {solid_voxels}\n") + fd.write(f"Total lattice updates per global step: {total_lattice_updates_per_step:,}\n") + fd.write(f"Actual number of refinement levels: {actual_num_levels}\n") + fd.write(f"Physical inlet velocity: {prescribed_velocity_phys:.4f} m/s\n") + fd.write(f"Lattice velocity (ulb): {ulb}\n") + fd.write(f"Characteristic length: {L: .4f} meters\n") + fd.write(f"Computed reference area (bc_mask): {reference_area} lattice units\n") + fd.write(f"Physical reference area (bc_mask): {reference_area_physical:.6f} m^2\n") + fd.write(f"Reynolds number: {Re:,.2f}\n") + fd.write(f'Inlet Velocity: {prescribed_velocity_phys:.1f} m/s \n') + fd.write(f'Timestep Size: {delta_t:.4e} seconds\n') + fd.write('Omega: '+str(omega)+'\n') + fd.write('ULB: '+str(settings['ulb'])+'\n\n') + fd.write('Results\n') + fd.write('___________________\n') + fd.write(f'Time to initialize: {(time.time()-start_time)/60:.2f} min\n') + + + solve( + sim, + ulb, + num_steps, + h5exporter, + output_dir, + grid_shape_zip, + grid_shape_x_coarsest, + delta_x_coarse, + shift, + momentum_transfer, + reference_area, + voxel_size, + prescribed_velocity_phys, + total_lattice_updates_per_step, + jsonfile + ) + + +# Mesh Generation Functions +# ========================= +def mesh_prep(voxel_size, car_mesh, body_mesh, wheel_meshes, output_dir, jsonfile): + + # Compute bounds on full car + min_bound = car_mesh.vertices.min(axis=0) + max_bound = car_mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + + + mesher_type = jsonfile['mesher']['type'] + # Generate mesh + if mesher_type == "mres": + shift = np.array( + [ + jsonfile['mesher']['mres']['domain']["-x"] * partSize[0] - min_bound[0], + jsonfile['mesher']['mres']['domain']["-y"] * partSize[1] - min_bound[1], + jsonfile['mesher']['mres']['domain']["-z"] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + #Apply shift to car mesh for meshing purpose + car_mesh.apply_translation(shift) + _ = car_mesh.vertex_normals + car_mesh.export("temp.stl") + + # Generate mesh using generate_mesh with ground refinement + level_data, _, sparsity_pattern, level_origins = generate_mesh( + jsonfile['mesher']['mres']['levels'], + "temp.stl", + jsonfile['settings']['voxelSize'], + jsonfile['mesher']['mres']['padding'], + jsonfile['mesher']['mres']['domain'], + ground_refinement_level=jsonfile['mesher']['mres']['ground_refinement_level'], + ground_voxel_height=jsonfile['mesher']['mres']['ground_voxel_height'], + ) + elif mesher_type == "cuboid": + # Compute translation to put mesh into first octant of the domain + domain_multiplier = jsonfile['mesher']['cuboid'] + shift = np.array( + [ + domain_multiplier[0][0] * partSize[0] - min_bound[0], + domain_multiplier[0][2] * partSize[1] - min_bound[1], + domain_multiplier[0][4] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + #Apply shift to car mesh for meshing purpose + car_mesh.apply_translation(shift) + _ = car_mesh.vertex_normals + car_mesh.export("temp.stl") + + # Generate mesh using Cuboid Mesher on full car + level_data, sparsity_pattern, level_origins = make_cuboid_mesh( + jsonfile['settings']['voxelSize'], + domain_multiplier, + "temp.stl", + ) + else: + raise ValueError(f"Invalid mesher_type: {mesher_type}. Must be 'mres' or 'cuboid'.") + + # Apply translation to each part + body_mesh.apply_translation(shift) + + if wheel_meshes is not None: + wheel_vertices = [] + body_vertices = np.asarray(body_mesh.vertices) / voxel_size + for mesh in wheel_meshes: + mesh.apply_translation(shift) + if jsonfile['mesher']['trim'] == True: + zShift = jsonfile['mesher']['trim_voxels'] + plane_origin = np.array([0, 0, mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export(os.path.join(output_dir, 'temp.stl')) + wheel_stl = os.path.join(output_dir, 'temp.stl') + wheel_mesh = trimesh.load_mesh(wheel_stl, process=False) + wheel_vertices.append(np.asarray(wheel_mesh.vertices) / voxel_size) + + else: + wheel_vertices.append(np.asarray(mesh.vertices) / voxel_size) + else: + #No Wheels trim body as needed + wheel_vertices=None + if jsonfile['mesher']['trim'] == True: + zShift = jsonfile['mesher']['trim_voxels'] + plane_origin = np.array([0, 0, body_mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = body_mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export(os.path.join(output_dir, 'temp.stl')) + body_stl = os.path.join(output_dir, 'temp.stl') + body_mesh = trimesh.load_mesh(body_stl, process=False) + body_vertices = np.asarray(body_mesh.vertices) / voxel_size + else: + body_vertices = np.asarray(body_mesh.vertices) / voxel_size + + + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + #print(f"Requested levels: {len(domain_multiplier)}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + # Clean all temp stls in the folder + for filename in os.listdir(output_dir): + # Check if the file ends with '.stl' and is a file (not a directory) + if filename.endswith('.stl') and os.path.isfile(os.path.join(output_dir, filename)): + file_path = os.path.join(output_dir, filename) + os.remove(file_path) + + return level_data, body_vertices, wheel_vertices, grid_shape_finest, partSize, actual_num_levels, shift, sparsity_pattern, level_origins + +# Boundary Conditions Setup +# ========================= +def setup_boundary_conditions(grid, level_data, body_vertices, wheel_vertices, ulb, lbm_visc, grid_shape_zip, precision_policy, jsonfile, velocity_set, compute_backend=ComputeBackend.NEON): + """ + Set up boundary conditions for the simulation. + """ + num_levels = len(level_data) + coarsest_level = num_levels - 1 + box = grid.bounding_box_indices(shape=grid.level_to_shape(coarsest_level)) + left_indices = grid.boundary_indices_across_levels(level_data, box_side="left", remove_edges=True) + right_indices = grid.boundary_indices_across_levels(level_data, box_side="right", remove_edges=True) + top_indices = grid.boundary_indices_across_levels(level_data, box_side="top", remove_edges=False) + bottom_indices = grid.boundary_indices_across_levels(level_data, box_side="bottom", remove_edges=False) + front_indices = grid.boundary_indices_across_levels(level_data, box_side="front", remove_edges=False) + back_indices = grid.boundary_indices_across_levels(level_data, box_side="back", remove_edges=False) + + # Filter front and back indices to remove overlaps with top and bottom at each level + filtered_front_indices = [] + filtered_back_indices = [] + filtered_top_indices = [] + filtered_bottom_indices = [] + for level in range(num_levels): + left_set = set(zip(*left_indices[level])) if left_indices[level] else set() + right_set = set(zip(*right_indices[level])) if right_indices[level] else set() + top_set = set(zip(*top_indices[level])) if top_indices[level] else set() + bottom_set = set(zip(*bottom_indices[level])) if bottom_indices[level] else set() + front_set = set(zip(*front_indices[level])) if front_indices[level] else set() + back_set = set(zip(*back_indices[level])) if back_indices[level] else set() + filtered_front_set = front_set - (top_set | bottom_set | left_set | right_set) + filtered_back_set = back_set - (top_set | bottom_set | left_set | right_set) + filtered_top_set = top_set - (left_set | right_set) + filtered_bottom_set = bottom_set - (left_set | right_set) + filtered_front_indices.append( + [list(coords) for coords in zip(*filtered_front_set)] if filtered_front_set else [] + ) + filtered_back_indices.append( + [list(coords) for coords in zip(*filtered_back_set)] if filtered_back_set else [] + ) + filtered_top_indices.append( + [list(coords) for coords in zip(*filtered_top_set)] if filtered_top_set else [] + ) + filtered_bottom_indices.append( + [list(coords) for coords in zip(*filtered_bottom_set)] if filtered_bottom_set else [] + ) + + # Inlet is either RegularizedBC or Noneq-Reg Hybrid with uniform value (set hybrid if ground refinement is on) + + if jsonfile['mesher']['type'] == 'mres' and jsonfile['mesher']['mres']['ground_refinement_level'] > -1 : + bc_inlet = HybridBC( + bc_method="nonequilibrium_regularized", + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + elif jsonfile['BCtypes']['inlet'] == "RegularizedBC": + bc_inlet = RegularizedBC("velocity", + #profile=bc_profile_new(), + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + else: + bc_inlet = HybridBC( + bc_method="nonequilibrium_regularized", + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + + bc_outlet = DoNothingBC(indices=right_indices) + + # Setup walls moving, static of fall back to FullBounce + if jsonfile['BCtypes']['walls'] == "moving": + bc_top =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=top_indices) + bc_front =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=filtered_front_indices) + bc_back =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=filtered_back_indices) + elif jsonfile['BCtypes']['walls'] == "static": + bc_top =HybridBC(bc_method="nonequilibrium_regularized", indices=top_indices) + bc_front =HybridBC(bc_method="nonequilibrium_regularized", indices=filtered_front_indices) + bc_back =HybridBC(bc_method="nonequilibrium_regularized", indices=filtered_back_indices) + else: + bc_top = FullwayBounceBackBC(indices=top_indices) + bc_front = FullwayBounceBackBC(indices=filtered_front_indices) + bc_back = FullwayBounceBackBC(indices=filtered_back_indices) + + # Setup ground moving, static or fall back to FullBounce + if jsonfile['BCtypes']['ground'] == "moving": + bc_bottom =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=bottom_indices) + elif jsonfile['BCtypes']['ground'] == "static": + bc_bottom =HybridBC(bc_method="nonequilibrium_regularized", indices=bottom_indices) + else: + bc_bottom = FullwayBounceBackBC(indices=bottom_indices) + + # Setup car as grads or non-eq + if jsonfile['BCtypes']['car'] == "bounceback_grads": + bc_body = HybridBC( + bc_method="bounceback_grads", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + else: + bc_body = HybridBC( + bc_method="nonequilibrium_regularized", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + + # Setup Wheels as rotating or static + if wheel_vertices is not None: + wheel_bc = [] + for wheel_vertice in wheel_vertices: + if jsonfile['BCtypes']['wheels'] == "bounceback_grads": #need to add movign profile in here somehow + wheel_bc.append(HybridBC( + bc_method="bounceback_grads", + mesh_vertices=wheel_vertice, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + ) + else: + wheel_bc.append(HybridBC( + bc_method="nonequilibrium_regularized", + mesh_vertices=wheel_vertice, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + ) + return wheel_bc + [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + else: + return [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + +# Utility Functions +# ================= +def print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values): + """ + Calculate and print lift and drag coefficients. + """ + boundary_force = momentum_transfer(sim.f_0, sim.f_1, sim.bc_mask, sim.missing_mask) + drag = boundary_force[0] + lift = boundary_force[2] + cd = 2.0 * drag / (ulb**2 * reference_area) + cl = 2.0 * lift / (ulb**2 * reference_area) + if np.isnan(cd) or np.isnan(cl): + raise ValueError(f"NaN detected in coefficients at step {step}: Cd={cd}, Cl={cl}") + drag_values.append([cd, cl]) + # print(f"CD={cd:.3f}, CL={cl:.3f}, Drag Force (lattice units)={drag:.6f}") + return cd, cl, drag + +def plot_drag_lift(drag_values, output_dir, print_interval, script_name, percentile_range=(15, 85), use_log_scale=False): + """ + Plot CD and CL over time and save the plot to the output directory. + """ + drag_values_array = np.array(drag_values) + steps = np.arange(0, len(drag_values) * print_interval, print_interval) + cd_values = drag_values_array[:, 0] + cl_values = drag_values_array[:, 1] + y_min = min(np.percentile(cd_values, percentile_range[0]), np.percentile(cl_values, percentile_range[0])) + y_max = max(np.percentile(cd_values, percentile_range[1]), np.percentile(cl_values, percentile_range[1])) + padding = (y_max - y_min) * 0.1 + y_min, y_max = y_min - padding, y_max + padding + if use_log_scale: + y_min = max(y_min, 1e-6) + plt.figure(figsize=(10, 6)) + plt.plot(steps, cd_values, label='Drag Coefficient (Cd)', color='blue') + plt.plot(steps, cl_values, label='Lift Coefficient (Cl)', color='red') + plt.xlabel('Simulation Step') + plt.ylabel('Coefficient') + plt.title(f'{script_name}: Drag and Lift Coefficients Over Time') + plt.legend() + plt.grid(True) + plt.ylim(y_min, y_max) + if use_log_scale: + plt.yscale('log') + plt.savefig(os.path.join(output_dir, 'drag_lift_plot.png')) + plt.close() + +def compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size): + """ + Compute active/solid voxels, totals, lattice updates, and reference area based on simulation data. + """ + # Compute macro fields + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + fields_data = bc_mask_exporter.get_fields_data({"bc_mask": sim.bc_mask}) + bc_mask_data = fields_data["bc_mask_0"] + level_id_field = bc_mask_exporter.level_id_field + + # Compute solid voxels per level (assuming 255 is the solid marker) + solid_voxels = [] + for lvl in range(actual_num_levels): + level_mask = level_id_field == lvl + solid_voxels.append(np.sum(bc_mask_data[level_mask] == 255)) + + # Compute active voxels (total non-zero in sparsity minus solids) + active_voxels = [np.count_nonzero(mask) for mask in sparsity_pattern] + active_voxels = [max(0, active_voxels[lvl] - solid_voxels[lvl]) for lvl in range(actual_num_levels)] + + # Totals + total_voxels = sum(active_voxels) + total_lattice_updates_per_step = sum(active_voxels[lvl] * (2 ** (actual_num_levels - 1 - lvl)) for lvl in range(actual_num_levels)) + + # Compute reference area (projected on YZ plane at finest level) + finest_level = 0 + mask_finest = level_id_field == finest_level + bc_mask_finest = bc_mask_data[mask_finest] + active_indices_finest = np.argwhere(level_data[0][0]) + bc_body_id = boundary_conditions[-1].id # Assuming last BC is bc_body + solid_voxels_indices = active_indices_finest[bc_mask_finest == bc_body_id] + unique_jk = np.unique(solid_voxels_indices[:, 1:3], axis=0) + reference_area = unique_jk.shape[0] + reference_area_physical = reference_area * (voxel_size ** 2) + + return { + "active_voxels": active_voxels, + "solid_voxels": solid_voxels, + "total_voxels": total_voxels, + "total_lattice_updates_per_step": total_lattice_updates_per_step, + "reference_area": reference_area, + "reference_area_physical": reference_area_physical + } + + +def save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse,voxel_size, sim,jsonfile): + domainSize = np.array(grid_shape_zip) * voxel_size + outputSlices = jsonfile['outputSlices'] + # Map axis to plane normal + axis_to_normal = { + 'X': [1, 0, 0], + 'Y': [0, 1, 0], + 'Z': [0, 0, 1] + } + domain_min = -shift + domain_max = domain_min + domainSize + tic = time.time() + def compute_slice_bounds_relative_to_domain(origin, width, height, width_vec, height_vec, domain_min, domain_max, plane_normal): + # Build in-plane basis + n = np.array(plane_normal) / np.linalg.norm(plane_normal) + if np.allclose(n, [1, 0, 0]): + u1 = np.array([0, 1, 0]) + else: + u1 = np.array([1, 0, 0]) + u1 = u1 / np.linalg.norm(u1) + u2 = np.cross(n, u1) + u2 = u2 / np.linalg.norm(u2) + width_vec_norm = width_vec / np.linalg.norm(width_vec) + height_vec_norm = height_vec / np.linalg.norm(height_vec) + if np.dot(u1, width_vec_norm) < 0: + u1 = -u1 + if np.dot(u2, height_vec_norm) < 0: + u2 = -u2 + + # Use the lower-left corner of the slice as the reference point + ref_point = origin + + # Project domain corners onto the plane and compute in-plane coordinates + domain_corners = np.array([ + [domain_min[0], domain_min[1], domain_min[2]], + [domain_max[0], domain_min[1], domain_min[2]], + [domain_min[0], domain_max[1], domain_min[2]], + [domain_max[0], domain_max[1], domain_min[2]], + [domain_min[0], domain_min[1], domain_max[2]], + [domain_max[0], domain_min[1], domain_max[2]], + [domain_min[0], domain_max[1], domain_max[2]], + [domain_max[0], domain_max[1], domain_max[2]] + ]) + local_corners = [] + for corner in domain_corners: + # Project corner onto the plane + proj = corner - np.dot(corner - ref_point, n) * n + local_x = np.dot(proj - ref_point, u1) + local_y = np.dot(proj - ref_point, u2) + local_corners.append([local_x, local_y]) + local_corners = np.array(local_corners) + domain_u_min, domain_u_max = local_corners[:, 0].min(), local_corners[:, 0].max() + domain_v_min, domain_v_max = local_corners[:, 1].min(), local_corners[:, 1].max() + + # Project slice corners onto the plane and compute in-plane coordinates + slice_corners = [ + origin, + origin + width * width_vec, + origin + height * height_vec, + origin + width * width_vec + height * height_vec + ] + slice_local = [] + for corner in slice_corners: + proj = corner - np.dot(corner - ref_point, n) * n + local_x = np.dot(proj - ref_point, u1) + local_y = np.dot(proj - ref_point, u2) + slice_local.append([local_x, local_y]) + slice_local = np.array(slice_local) + slice_u_min, slice_u_max = slice_local[:, 0].min(), slice_local[:, 0].max() + slice_v_min, slice_v_max = slice_local[:, 1].min(), slice_local[:, 1].max() + + # Convert to fractions + u_min = (slice_u_min - domain_u_min) / (domain_u_max - domain_u_min) + u_max = (slice_u_max - domain_u_min) / (domain_u_max - domain_u_min) + v_min = (slice_v_min - domain_v_min) / (domain_v_max - domain_v_min) + v_max = (slice_v_max - domain_v_min) / (domain_v_max - domain_v_min) + + return [max(0,u_min), min(1,u_max), max(0,v_min), min(1,v_max)] + + for slice_group in outputSlices: + field_name = slice_group['field'] + axis = slice_group['axis'] + height = slice_group['height'] + width = slice_group['width'] + # Extract vectors + height_vec = np.array([ + slice_group['heightVec']['x'], + slice_group['heightVec']['y'], + slice_group['heightVec']['z'] + ]) + width_vec = np.array([ + slice_group['widthVec']['x'], + slice_group['widthVec']['y'], + slice_group['widthVec']['z'] + ]) + + # Get plane normal + plane_normal = axis_to_normal[axis] + + # Process each origin + for idx, origin_dict in enumerate(slice_group['origin']): + # The origin / plane point is the lower-left corner of the slice + plane_point = np.array([ + origin_dict['x'], + origin_dict['y'], + origin_dict['z'] + ]) + + # Calculate bounds in model units + + # Calculate the bounds + # Since we're given absolute dimensions, we need to compute + # the bounds relative to the full domain extent in the plane + # For now, we'll use bounds [0, 1, 0, 1] to capture the full slice + # as defined by the width and height + bounds = [0, 1, 0, 1] + bounds_x, bounds_x2, bounds_y, bounds_y2 = compute_slice_bounds_relative_to_domain(plane_point, width, height, width_vec, height_vec, domain_min, domain_max, plane_normal) + print(f'bounds {bounds_x}, {bounds_x2}, {bounds_y}, {bounds_y2}') + # Alternatively, if you want to compute bounds relative to domain: + # You would need to: + # 1. Project domain extents onto the plane + # 2. Calculate where this slice sits within those extents + # 3. Set bounds accordingly + + # Generate output filename + output_filename = os.path.join( + output_dir, + f"{axis}_slice_{idx:03d}" + ) + + print(f"Generating slice: {output_filename}") + print(f" Axis: {axis}, Normal: {plane_normal}") + print(f" Plane point: {plane_point}") + print(f" Width: {width}, Height: {height}") + wp.synchronize() + print(f"Max Velocity for slice scaling: {jsonfile['InletBC']['x'] * jsonfile['settings']['sliceFactor']}") + h5exporter.to_slice_image( + output_filename, + {"velocity": sim.u}, + plane_point=plane_point, + plane_normal=plane_normal, + grid_res=jsonfile['settings']['grid_res'], + bounds=(bounds_x, bounds_x2, bounds_y, bounds_y2), + show_axes=False, + show_colorbar=False, + cmap=jsonfile['settings']['sliceColorMap'], + normalize=jsonfile['InletBC']['x'] * jsonfile['settings']['sliceFactor'], + slice_thickness=delta_x_coarse #needed when using model units + ) + print(f"Time to save all images {time.time()-tic} seconds. ") + + +def solve( + sim, + ulb, + num_steps, + h5exporter, + output_dir, + grid_shape_zip, + grid_shape_x_coarsest, + delta_x_coarse, + shift, + momentum_transfer, + reference_area, + voxel_size, + prescribed_velocity_phys, + total_lattice_updates_per_step, + jsonfile + ): + + # -------------------------- Simulation Loop -------------------------- + wp.synchronize() + print(f"\n*******\nSolver Started\n*******\n") + start_time = time.time() + solve_start = start_time + compute_time = 0.0 + steps_since_last_print = 0 + drag_values = [] + + # Calculate print and file output intervals + print_interval = max(1, int(num_steps * (jsonfile['settings']['solutionPrintFreq'] / 100.0))) + crossover_step = int(num_steps * (jsonfile['settings']['crossover'] / 100.0)) + file_output_interval_pre_crossover = max(1, int(crossover_step / jsonfile['settings']['preCrossover_frames'])) if jsonfile['settings']['preCrossover_frames'] > 0 else num_steps + 1 + file_output_interval_post_crossover = max(1, int((num_steps - crossover_step) / jsonfile['settings']['postCrossover_frames'])) if jsonfile['settings']['postCrossover_frames'] > 0 else num_steps + 1 + final_print_interval = max(1, int((num_steps-crossover_step) * (jsonfile['settings']['crossover'] / 100.0))) + + if jsonfile['settings']['debug']: + for step in range(num_steps): + solution_time =(time.time()-solve_start)/60 + step_start = time.time() + sim.step() + wp.synchronize() + compute_time += time.time() - step_start + steps_since_last_print += 1 + percent_complete = (step + 1) / num_steps * 100 + scm_progress(np.floor(percent_complete)) + end_time = time.time() + elapsed = end_time - start_time + time_out = False + if elapsed/60 >= jsonfile['settings']['limit']: + time_out = True + if (step % print_interval == 0 and step < crossover_step) or step == num_steps - 1 or time_out: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=jsonfile['settings']['grid_res'], + bounds=(0.25, 0.75, 0, 0.5), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = prescribed_velocity_phys*jsonfile['settings']['sliceFactor'], + ) + + total_lattice_updates = total_lattice_updates_per_step * steps_since_last_print + MLUPS = total_lattice_updates / compute_time / 1e6 if compute_time > 0 else 0.0 + current_flow_passes = step * ulb / grid_shape_x_coarsest + remaining_steps = num_steps - step - 1 + time_remaining = 0.0 if MLUPS == 0 else (total_lattice_updates_per_step * remaining_steps) / (MLUPS * 1e6) + hours, rem = divmod(time_remaining, 3600) + minutes, seconds = divmod(rem, 60) + time_remaining_str = f"{int(hours):02d}h {int(minutes):02d}m {int(seconds):02d}s" + + print(f"Completed step {step}/{num_steps} ({percent_complete:.2f}% complete)") + print(f" Flow Passes: {current_flow_passes:.2f}") + print(f" Time elapsed: {elapsed:.1f}s, Compute time: {compute_time:.1f}s, ETA: {time_remaining_str}") + print(f" MLUPS: {MLUPS:.1f}") + print(f" Cd={cd:.3f}, Cl={cl:.3f}, Drag Force (lattice units)={drag:.3f}") + #start_time = time.time() + compute_time = 0.0 + steps_since_last_print = 0 + scm_results_available() + if time_out: + wp.synchronize() + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse,voxel_size, sim,jsonfile) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"*** Solution Timed out ***\n") + fd.write(f"Actual iterations: {step}\n") + print('Time limit reached') + break + + file_output_interval = file_output_interval_pre_crossover if step < crossover_step else file_output_interval_post_crossover + if step % file_output_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + wp.synchronize() + if step >= crossover_step and step % final_print_interval ==0 : + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + print(f"Completed step {step}/{num_steps} ") + print(f" Cd= {cd:.3f}, Cl= {cl:.3f}, Drag Force (lattice units)={drag:.3f}") + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=jsonfile['settings']['grid_res'], + bounds=(0, 1, 0, 1), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = prescribed_velocity_phys*jsonfile['settings']['sliceFactor'], + ) + + + + + + # Save drag and lift data to CSV + if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, jsonfile['outputName']) + + # Calculate and print average Cd and Cl for the last 50% + drag_values_array = np.array(drag_values) + + start_index = int(len(drag_values) * (jsonfile['settings']['crossover'] / 100.0)) + last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(last_half[:, 0]) + avg_cl = np.mean(last_half[:, 1]) + epsilon = 1e-6 + target_cd = jsonfile['vehicle']['targets']['cd'] + epsilon + target_cl = jsonfile['vehicle']['targets']['cl'] + epsilon + print(f"Experimental Drag Coefficient (Cd): {target_cd}\n" + f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n" + f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n" + f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n" + f"Error Lift Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n" + ) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n") + fd.write(f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n") + fd.write(f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n") + fd.write(f"Error Lift Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n") + fd.write(f'Total Solution Time: {(time.time()-solve_start)/60:.3f} min\n') + + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse, voxel_size,sim,jsonfile) + with open(os.path.join(output_dir, "source.json"), 'w') as file: + json.dump(jsonfile, file, indent=4) # indent for pretty-printing + print(f"Source Json written to {os.path.join(output_dir, 'source.json')} successfully.") + + scm_results_available(True) + # Customer style run (no extra debug outputs) + # Runs setup and then only takes data from crossover to end + else: + print_interval=max(1, int((num_steps-crossover_step) * (jsonfile['settings']['solutionPrintFreq'] / 100.0))) + + for step in range(num_steps): + end_time = time.time() + elapsed = end_time - start_time + sim.step() + wp.synchronize() + percent_complete = (step + 1) / num_steps * 100 + scm_progress(np.floor(percent_complete)) + + if elapsed/60 >= jsonfile['settings']['limit']: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"*** Solution Time Reached ***\n") + fd.write(f"Actual iterations: {step}\n") + print('Time limit reached') + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse, voxel_size,sim,jsonfile) + if jsonfile['settings']['fullData']==True: + wp.synchronize() + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + + scm_results_available() + break + + if step >= crossover_step: + if step % print_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values) + scm_results_available() + print(f"Completed step {step}/{num_steps}") + print(f" Cd={cd:.3f}, Cl={cl:.3f}, Drag Force (lattice units)={drag:.3f}") + + if (step == num_steps - 1) & (jsonfile['settings']['fullData']==True): + wp.synchronize() + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=0) + + # Save drag and lift data to CSV + if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, jsonfile['outputName']) + + # Calculate and print average Cd and Cl for the last 50% + drag_values_array = np.array(drag_values) + + #start_index = int(len(drag_values) * (jsonfile['settings']['crossover'] / 100.0)) + #last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(drag_values_array[:, 0]) + avg_cl = np.mean(drag_values_array[:, 1]) + epsilon = 1e-6 + target_cd = jsonfile['vehicle']['targets']['cd'] + epsilon + target_cl = jsonfile['vehicle']['targets']['cl'] + epsilon + print(f"Experimental Drag Coefficient (Cd): {0.307}\n" + f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n" + f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n" + f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n" + f"Error Lift Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n" + ) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n") + fd.write(f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n") + fd.write(f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n") + fd.write(f"Error Drag Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n") + fd.write(f'Total Solution Time: {(time.time()-solve_start)/60:.3f} min\n') + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse,voxel_size, sim,jsonfile) + + scm_results_available(True) + + + +def main(argv): + """ + Main entry point for the Studio Wind Tunnel Solver. + + Parses command-line arguments to obtain the input JSON file, initializes the simulation environment, + cleans up previous output files, and runs the wind tunnel simulation. Handles errors and reports + progress and completion status via SCM events. + + Args: + argv (list): List of command-line arguments. + + Returns: + int: Exit code. Returns 0 on success, 64 on argument/input errors, or 1 on simulation failure. + """ + + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(name)s: %(message)s', + handlers=[ + logging.StreamHandler(sys.stdout) + ] + ) + + input_file = '' + usage = 'windtunnel_json.py -i ' + + logging.info('Welcome to Studio Wind Tunnel Solver') + + try: + opts, _ = getopt.getopt(argv, "hi:o:", ["ifile="]) + except getopt.GetoptError: + logging.error(usage) + scm_set_error(64, 'Argument error') + return 64 + + for opt, arg in opts: + if opt == '-h': + logging.info(usage) + return 64 + + if opt in ("-i", "--ifile"): + input_file = arg + + if not input_file: + logging.error('Error: Input JSON file must be specified.\n' + usage) + scm_set_error(64, 'Input file not specified') + return 64 + + try: + if running_via_scm(): + log_file_scm = os.path.join(os.path.dirname(os.path.abspath(input_file)), 'solve.log') + scm_log_handler = logging.FileHandler(log_file_scm, mode='w') + scm_log_handler.setLevel(logging.INFO) + scm_log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')) + logging.getLogger().addHandler(scm_log_handler) + logging.info('SCM Log file: {}'.format(log_file_scm)) + + logging.info('Input file: {}'.format(input_file)) + + scm_init() + + prep_inputs(input_file) + + scm_complete() + except Exception as e: + logging.error(f'Exception occured: {e}') + scm_set_error(1, f'Job failed: {e}') + scm_cancel_heartbeat() + return 1 + + return 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/examples/project.json b/examples/project.json new file mode 100644 index 00000000..af7e504c --- /dev/null +++ b/examples/project.json @@ -0,0 +1,412 @@ +{ + +"mesher":{ + "type":"mres", + "cuboid": [ + [2.5, 3.5, 2.5, 2.5, 0.0, 5.0], + + + [0.55, 0.65, 0.65, 0.65, 0.0, 0.65], + [0.25, 0.25, 0.22, 0.22, 0.0, 0.25] + ], + "mres":{ + "levels":2, + "domain":{ + "-x": 1.5, + "x": 2.0, + "-y": 1.2, + "y": 1.2, + "-z": 0, + "z": 2.0 + }, + "padding":[ + [15, 50, 15, 15, 15, 15], + [10, 40, 10, 10, 10, 10], + [8, 20, 8, 8, 8, 8], + [8, 20, 8, 8, 8, 8], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6] + ], + "ground_refinement_level":-1, + "ground_voxel_height":6 + + }, + "trim":true, + "trim_voxels":3, + "close_voxels":1 +}, +"vehicle":{ + "body": "S550_GT500_BS_noRain.stl", + "wheels":[ + "S550_GT500_BS_RR.stl", + "S550_GT500_BS_RL.stl", + "S550_GT500_BS_FR.stl", + "S550_GT500_BS_FL.stl" + ], + "targets":{ + "cd":0.30, + "cl":-0.10 + } + +}, +"outputName": "StudioWindTunnel_Results2level", +"fluid": { + "density": 1.2047, + "viscosity": 1.817e-05 + }, +"InletBC":{ + "x": 30.0, + "y": 0.0, + "z": 0.0 + }, +"settings": { + "debug":true, + "doublePrecision": false, + "iterations": 4000, + "solutionPrintFreq": 1, + "crossover":80, + "preCrossover_frames":10, + "postCrossover_frames":25, + "ulb":0.05, + "fullData": true, + "flowPasses":3, + "voxelSize": 0.0085, + "writeFields": true, + "writeSlices": true, + "writePressure": true, + "sliceFactor": 1.75, + "sliceColorMap":"turbo", + "grid_res": 1500, + "limit":10 +}, +"BCtypes": { + "inlet": "RegularizedBC", + "car": "bounceback_grads", + "wheels": "bounceback_grads", + "ground": "moving", + "walls": "moving" +}, +"outputSlices": [ + { + "field": "velocity_magnitude", + "axis": "X", + "height": 2, + "heightVec": { + "x": 0.0, + "y": 0.0, + "z": 1.0 + }, + "width": 4, + "widthVec": { + "x": 0.0, + "y": 1.0, + "z": 0.0 + }, + "loadCase": 0, + "origin": [ + { + "x": -1.5, + "y": -2.0, + "z": 0.0 + },{ + "x": -1.175, + "y": -2.0, + "z": 0.0 + },{ + "x": -0.85, + "y": -2.0, + "z": 0.0 + },{ + "x": -0.525, + "y": -2.0, + "z": 0.0 + },{ + "x": -0.2, + "y": -2.0, + "z": 0.0 + },{ + "x": 0.125, + "y": -2.0, + "z": 0.0 + },{ + "x": 0.45, + "y": -2.0, + "z": 0.0 + },{ + "x": 0.775, + "y": -2.0, + "z": 0.0 + },{ + "x": 1.1, + "y": -2.0, + "z": 0.0 + },{ + "x": 1.425, + "y": -2.0, + "z": 0.0 + },{ + "x": 1.75, + "y": -2.0, + "z": 0.0 + },{ + "x": 2.075, + "y": -2.0, + "z": 0.0 + },{ + "x": 2.4, + "y": -2.0, + "z": 0.0 + },{ + "x": 2.725, + "y": -2.0, + "z": 0.0 + },{ + "x": 3.05, + "y": -2.0, + "z": 0.0 + },{ + "x": 3.375, + "y": -2.0, + "z": 0.0 + },{ + "x": 3.7, + "y": -2.0, + "z": 0.0 + },{ + "x": 4.025, + "y": -2.0, + "z": 0.0 + },{ + "x": 4.35, + "y": -2.0, + "z": 0.0 + },{ + "x": 4.675, + "y": -2.0, + "z": 0.0 + },{ + "x": 5.0, + "y": -2.0, + "z": 0.0 + } + ] + }, + { + "field": "velocity_magnitude", + "axis": "Y", + "height": 2, + "heightVec": { + "x": 0.0, + "y": 0.0, + "z": 1.0 + }, + "width": 8, + "widthVec": { + "x": 1.0, + "y": 0.0, + "z": 0.0 + }, + "loadCase": 0, + "origin": [ + { + "x": -1.50, + "y": -2.0, + "z": 0.0 + },{ + "x": -1.50, + "y": -1.8, + "z": 0.0 + },{ + "x": -1.50, + "y": -1.6, + "z": 0.0 + },{ + "x": -1.50, + "y": -1.4, + "z": 0.0 + },{ + "x": -1.50, + "y": -1.2, + "z": 0.0 + },{ + "x": -1.50, + "y": -1.0, + "z": 0.0 + },{ + "x": -1.50, + "y": -0.80, + "z": 0.0 + },{ + "x": -1.50, + "y": -0.60, + "z": 0.0 + },{ + "x": -1.50, + "y": -0.40, + "z": 0.0 + },{ + "x": -1.50, + "y": -0.20, + "z": 0.0 + },{ + "x": -1.50, + "y": 0.0, + "z": 0.0 + },{ + "x": -1.50, + "y": 0.20, + "z": 0.0 + },{ + "x": -1.50, + "y": 0.40, + "z": 0.0 + },{ + "x": -1.50, + "y": 0.60, + "z": 0.0 + },{ + "x": -1.50, + "y": 0.80, + "z": 0.0 + },{ + "x": -1.50, + "y": 1.0, + "z": 0.0 + },{ + "x": -1.50, + "y": 1.2, + "z": 0.0 + },{ + "x": -1.50, + "y": 1.4, + "z": 0.0 + },{ + "x": -1.50, + "y": 1.6, + "z": 0.0 + },{ + "x": -1.50, + "y": 1.8, + "z": 0.0 + },{ + "x": -1.50, + "y": 2.0, + "z": 0.0 + } + + ] + }, + { + "field": "velocity_magnitude", + "axis": "Z", + "height": 4, + "heightVec": { + "x": 0.0, + "y": 1.0, + "z": 0.0 + }, + "width": 8, + "widthVec": { + "x": 1, + "y": 0.0, + "z": 0.0 + }, + "loadCase": 0, + "origin": [ + { + "x": -1.50, + "y": -2.0, + "z": 0.0 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.09 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.18 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.27 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.36 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.45 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.54 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.63 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.72 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.81 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.9 + },{ + "x": -1.50, + "y": -2.0, + "z": 0.99 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.08 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.17 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.26 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.35 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.44 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.53 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.62 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.71 + },{ + "x": -1.50, + "y": -2.0, + "z": 1.8 + } + ] + } + + + ] + +} \ No newline at end of file diff --git a/examples/sphere.py b/examples/sphere.py new file mode 100644 index 00000000..e2a930ff --- /dev/null +++ b/examples/sphere.py @@ -0,0 +1,683 @@ +import neon +import warp as wp +import numpy as np +import time +import os +import re +import matplotlib.pyplot as plt +import trimesh +import shutil + +import xlb +from xlb.compute_backend import ComputeBackend +from xlb.precision_policy import PrecisionPolicy +from xlb.grid import multires_grid_factory +from xlb.operator.boundary_condition import ( + FullwayBounceBackBC, + HalfwayBounceBackBC, + RegularizedBC, + ExtrapolationOutflowBC, + DoNothingBC, + ZouHeBC, + HybridBC, +) +from xlb.operator.boundary_masker import MeshVoxelizationMethod +from xlb.utils.mesher import make_cuboid_mesh, MultiresIO +from xlb.utils.makemesh import generate_mesh +from xlb.operator.force import MultiresMomentumTransfer +from xlb.helper.initializers import CustomMultiresInitializer +from xlb import MresPerfOptimizationType + +wp.clear_kernel_cache() +wp.config.quiet = True + +# User Configuration +# ================= +# Physical and simulation parameters +voxel_size = 0.15/300.0 # Finest voxel size in meters +ulb = 0.05 # Lattice velocity +flow_passes = 3 # Domain flow passes +kinematic_viscosity = 1.508e-5 # Kinematic viscosity of air in m^2/s + +# STL filename +stl_filename = "examples/stl/sphere.stl" +base_script_name = "Sphere 300D" + +# List of Reynolds numbers to simulate +#reynolds_numbers = [30, 50, 100, 300, 500, 1000, 3000, 10000, 30000, 100000, 200000, 500000, 1000000] + +reynolds_numbers = [ 10000, 1000000] + + +# I/O settings +print_interval_percentage = 5 # Print every 1% of iterations +file_output_crossover_percentage = 80 # Crossover at 50% of iterations +num_file_outputs_pre_crossover = 1 # Outputs before crossover +num_file_outputs_post_crossover = 1 # Outputs after crossover + +# Other setup parameters +compute_backend = ComputeBackend.NEON +precision_policy = PrecisionPolicy.FP32FP32 +velocity_set = xlb.velocity_set.D3Q27(precision_policy=precision_policy, compute_backend=compute_backend) + +# Choose mesher type +mesher_type = "makemesh" # Options: "makemesh" or "cuboid" + +# Mesh Generation Functions +# ========================= +def generate_makemesh_mesh(stl_filename, voxel_size, ground_refinement_level=None, ground_voxel_height=4): + """ + Generate a makemesh mesh based on the provided voxel size in meters, domain multipliers, and padding values. + """ + # Number of requested refinement levels + num_levels = 8 + + # Domain multipliers for the full domain + domainMultiplier = { + "-x": 3, + "x": 6, + "-y": 4, + "y": 4, + "-z": 4, + "z": 4, + } + + padding_values = [ + [15, 15, 15, 15, 15, 15], + [15, 15, 15, 15, 15, 15], + [8, 8, 8, 8, 8, 8], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + + ] + + # Load the mesh + mesh = trimesh.load_mesh(stl_filename, process=False) + if mesh.is_empty: + raise ValueError("Loaded mesh is empty or invalid.") + + # Compute original bounds + min_bound = mesh.vertices.min(axis=0) + max_bound = mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + + # Compute translation to put mesh into first octant of the domain + shift = np.array( + [ + domainMultiplier["-x"] * partSize[0] - min_bound[0], + domainMultiplier["-y"] * partSize[1] - min_bound[1], + domainMultiplier["-z"] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + + # Apply translation and save out temp STL + mesh.apply_translation(shift) + _ = mesh.vertex_normals + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + mesh.export("temp.stl") + + # Generate mesh using generate_mesh with ground refinement + level_data, _, sparsity_pattern, level_origins = generate_mesh( + num_levels, + "temp.stl", + voxel_size, + padding_values, + domainMultiplier, + ground_refinement_level=ground_refinement_level, + ground_voxel_height=ground_voxel_height, + ) + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {num_levels}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + os.remove("temp.stl") + + return level_data, mesh_vertices, tuple([int(a) for a in grid_shape_finest]), partSize, actual_num_levels, shift, sparsity_pattern, level_origins + +def generate_cuboid_mesh(stl_filename, voxel_size): + """ + Alternative cuboid mesh generation based on Apolo's method with domain multipliers per level. + """ + # Domain multipliers for each refinement level + domain_multiplier = [ + [3, 6, 4, 4, 4, 4], # -x, x, -y, y, -z, z + [1.5, 3, 1.5, 1.5, 1.5, 1.5], # -x, x, -y, y, -z, z + [1, 2, 1, 1, 1, 1], + [0.25, 0.5, 0.25, 0.25, 0.25, 0.25], + # [1, 2, 1, 1, 1, 1], + # [0.4, 1, 0.4, 0.4, 0.4, 0.4], + # [0.2, 0.4, 0.2, 0.2, 0.2, 0.2], + ] + + + + # Load the mesh + mesh = trimesh.load_mesh(stl_filename, process=False) + if mesh.is_empty: + raise ValueError("Loaded mesh is empty or invalid.") + + # Compute original bounds + min_bound = mesh.vertices.min(axis=0) + max_bound = mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + + # Compute translation to put mesh into first octant of the domain + shift = np.array( + [ + domain_multiplier[0][0] * partSize[0] - min_bound[0], + domain_multiplier[0][2] * partSize[1] - min_bound[1], + domain_multiplier[0][4] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + + # Apply translation and save out temp STL + mesh.apply_translation(shift) + _ = mesh.vertex_normals + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + mesh.export("temp.stl") + + # Generate mesh using make_cuboid_mesh + level_data, sparsity_pattern, level_origins = make_cuboid_mesh( + voxel_size, + domain_multiplier, + "temp.stl", + ) + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {len(domain_multiplier)}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + os.remove("temp.stl") + + return level_data, mesh_vertices, tuple([int(a) for a in grid_shape_finest]), partSize, actual_num_levels, shift, sparsity_pattern, level_origins + +# Boundary Conditions Setup +# ========================= +def setup_boundary_conditions(grid, level_data, body_vertices, ulb, nu_lattice, compute_backend=ComputeBackend.NEON): + """ + Set up boundary conditions for the simulation. + """ + num_levels = len(level_data) + coarsest_level = num_levels - 1 + box = grid.bounding_box_indices(shape=grid.level_to_shape(coarsest_level)) + left_indices = grid.boundary_indices_across_levels(level_data, box_side="left", remove_edges=True) + right_indices = grid.boundary_indices_across_levels(level_data, box_side="right", remove_edges=True) + top_indices = grid.boundary_indices_across_levels(level_data, box_side="top", remove_edges=False) + bottom_indices = grid.boundary_indices_across_levels(level_data, box_side="bottom", remove_edges=False) + front_indices = grid.boundary_indices_across_levels(level_data, box_side="front", remove_edges=False) + back_indices = grid.boundary_indices_across_levels(level_data, box_side="back", remove_edges=False) + + # box_no_edge = grid.bounding_box_indices(shape=grid.level_to_shape(coarsest_level), remove_edges=True) + # inlet = box_no_edge["left"] + # outlet = box_no_edge["right"] + # walls = [box["bottom"][i] + box["top"][i] + box["front"][i] + box["back"][i] for i in range(velocity_set.d)] + # walls = np.unique(np.array(walls), axis=-1).tolist() + + # inlet = [[] for _ in range(num_levels - 1)] + [inlet] + # outlet = [[] for _ in range(num_levels - 1)] + [outlet] + # walls = [[] for _ in range(num_levels - 1)] + [walls] + + # Filter front and back indices to remove overlaps with top and bottom at each level + filtered_front_indices = [] + filtered_back_indices = [] + filtered_top_indices = [] + filtered_bottom_indices = [] + for level in range(num_levels): + left_set = set(zip(*left_indices[level])) if left_indices[level] else set() + right_set = set(zip(*right_indices[level])) if right_indices[level] else set() + top_set = set(zip(*top_indices[level])) if top_indices[level] else set() + bottom_set = set(zip(*bottom_indices[level])) if bottom_indices[level] else set() + front_set = set(zip(*front_indices[level])) if front_indices[level] else set() + back_set = set(zip(*back_indices[level])) if back_indices[level] else set() + filtered_front_set = front_set - (top_set | bottom_set ) + filtered_back_set = back_set - (top_set | bottom_set ) + filtered_top_set = top_set - (left_set | right_set) + filtered_bottom_set = bottom_set - (left_set | right_set) + filtered_front_indices.append( + [list(coords) for coords in zip(*filtered_front_set)] if filtered_front_set else [] + ) + filtered_back_indices.append( + [list(coords) for coords in zip(*filtered_back_set)] if filtered_back_set else [] + ) + filtered_top_indices.append( + [list(coords) for coords in zip(*filtered_top_set)] if filtered_top_set else [] + ) + filtered_bottom_indices.append( + [list(coords) for coords in zip(*filtered_bottom_set)] if filtered_bottom_set else [] + ) + + # Turbulent Flow Profile + def bc_profile(taper_fraction=0.05): + assert compute_backend == ComputeBackend.NEON + _, ny, nz = grid_shape_zip + dtype = precision_policy.compute_precision.wp_dtype + H_y = dtype(ny // 2 ** (num_levels - 1) - 1) + H_z = dtype(nz // 2 ** (num_levels - 1) - 1) + two = dtype(2.0) + ulb_wp = dtype(ulb) + taper_frac = dtype(taper_fraction) + core_frac = dtype(1.0 - 2.0 * taper_fraction) + + @wp.func + def bc_profile_warp(index: wp.vec3i): + y = dtype(index[1]) + z = dtype(index[2]) + y_center = wp.abs(y - (H_y / two)) + z_center = wp.abs(z - (H_z / two)) + y_norm = two * y_center / H_y + z_norm = two * z_center / H_z + max_norm = wp.max(y_norm, z_norm) + velocity = ulb_wp + if max_norm > core_frac: + velocity = ulb_wp * (dtype(1.0) - (max_norm - core_frac) / taper_frac) + velocity = wp.max(dtype(0.0), velocity) + return wp.vec(velocity, length=1) + + return bc_profile_warp + + bc_inlet = RegularizedBC( + "velocity", + #profile=bc_profile_taper(), + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + + bc_outlet = DoNothingBC(indices=right_indices) + + #bc_top = FullwayBounceBackBC(indices=top_indices) + bc_top = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=top_indices) + + bc_bottom = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=bottom_indices) + #bc_bottom = FullwayBounceBackBC(indices=bottom_indices) + #bc_front = FullwayBounceBackBC(indices=filtered_front_indices) + bc_front = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=filtered_front_indices) + #bc_back = FullwayBounceBackBC(indices=filtered_back_indices) + bc_back = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=filtered_back_indices) + + bc_body = HybridBC( + bc_method="bounceback_grads", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=3), + use_mesh_distance=True, + ) + + return [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + # return [bc_walls, bc_inlet, bc_outlet, bc_body] + + +# Simulation Initialization +# ========================= +def initialize_simulation(grid, boundary_conditions, omega, initializer, collision_type="KBC", mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST): + """ + Initialize the multiresolution simulation manager. + """ + sim = xlb.helper.MultiresSimulationManager( + omega=omega, + grid=grid, + boundary_conditions=boundary_conditions, + collision_type=collision_type, + initializer=initializer, + mres_perf_opt=mres_perf_opt, + ) + return sim + +# Utility Functions +# ================= +def print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size): + """ + Calculate and print lift and drag coefficients. + """ + boundary_force = momentum_transfer(sim.f_0, sim.f_1, sim.bc_mask, sim.missing_mask) + drag = boundary_force[0] + lift = boundary_force[2] + cd = 2.0 * drag / (ulb**2 * reference_area) + cl = 2.0 * lift / (ulb**2 * reference_area) + if np.isnan(cd) or np.isnan(cl): + raise ValueError(f"NaN detected in coefficients at step {step}: Cd={cd}, Cl={cl}") + drag_values.append([cd, cl]) + print(f"CD={cd:.3f}, CL={cl:.3f}, Drag Force (lattice units)={drag:.6f}") + +def plot_drag_lift(drag_values, output_dir, print_interval, script_name, percentile_range=(15, 85), use_log_scale=False): + """ + Plot CD and CL over time and save the plot to the output directory. + """ + drag_values_array = np.array(drag_values) + steps = np.arange(0, len(drag_values) * print_interval, print_interval) + cd_values = drag_values_array[:, 0] + cl_values = drag_values_array[:, 1] + y_min = min(np.percentile(cd_values, percentile_range[0]), np.percentile(cl_values, percentile_range[0])) + y_max = max(np.percentile(cd_values, percentile_range[1]), np.percentile(cl_values, percentile_range[1])) + padding = (y_max - y_min) * 0.1 + y_min, y_max = y_min - padding, y_max + padding + if use_log_scale: + y_min = max(y_min, 1e-6) + plt.figure(figsize=(10, 6)) + plt.plot(steps, cd_values, label='Drag Coefficient (Cd)', color='blue') + plt.plot(steps, cl_values, label='Lift Coefficient (Cl)', color='red') + plt.xlabel('Simulation Step') + plt.ylabel('Coefficient') + plt.title(f'{script_name}: Drag and Lift Coefficients Over Time') + plt.legend() + plt.grid(True) + plt.ylim(y_min, y_max) + if use_log_scale: + plt.yscale('log') + plt.savefig(os.path.join(output_dir, 'drag_lift_plot.png')) + plt.close() + +def compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size): + """ + Compute active/solid voxels, totals, lattice updates, and reference area based on simulation data. + """ + # Compute macro fields + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + fields_data = bc_mask_exporter.get_fields_data({"bc_mask": sim.bc_mask}) + bc_mask_data = fields_data["bc_mask_0"] + level_id_field = bc_mask_exporter.level_id_field + + # Compute solid voxels per level (assuming 255 is the solid marker) + solid_voxels = [] + for lvl in range(actual_num_levels): + level_mask = level_id_field == lvl + solid_voxels.append(np.sum(bc_mask_data[level_mask] == 255)) + + # Compute active voxels (total non-zero in sparsity minus solids) + active_voxels = [np.count_nonzero(mask) for mask in sparsity_pattern] + active_voxels = [max(0, active_voxels[lvl] - solid_voxels[lvl]) for lvl in range(actual_num_levels)] + + # Totals + total_voxels = sum(active_voxels) + total_lattice_updates_per_step = sum(active_voxels[lvl] * (2 ** (actual_num_levels - 1 - lvl)) for lvl in range(actual_num_levels)) + + # Compute reference area (projected on YZ plane at finest level) + finest_level = 0 + mask_finest = level_id_field == finest_level + bc_mask_finest = bc_mask_data[mask_finest] + active_indices_finest = np.argwhere(level_data[0][0]) + bc_body_id = boundary_conditions[-1].id # Assuming last BC is bc_body + solid_voxels_indices = active_indices_finest[bc_mask_finest == bc_body_id] + unique_jk = np.unique(solid_voxels_indices[:, 1:3], axis=0) + reference_area = unique_jk.shape[0] + reference_area_physical = reference_area * (voxel_size ** 2) + + return { + "active_voxels": active_voxels, + "solid_voxels": solid_voxels, + "total_voxels": total_voxels, + "total_lattice_updates_per_step": total_lattice_updates_per_step, + "reference_area": reference_area, + "reference_area_physical": reference_area_physical + } + +# Experimental data +experimental_re = [ + 19.42, 32.29, 58.43, 88.00, 144.28, 272.54, 422.44, 664.10, 1153.20, 1788.30, + 3019.50, 5172.00, 8615.00, 13949.00, 20987.00, 32653.00, 52119.00, 8.32e4, + 1.37e5, 1.95e5, 2.36e5, 2.65e5, 2.84e5, 3.00e5, 3.17e5, 3.45e5, 3.86e5, + 4.54e5, 5.12e5, 6.25e5, 7.74e5, 9.05e5, 9.86e5 +] +experimental_cd = [ + 2.6658, 1.9542, 1.4126, 1.1271, 0.8867, 0.6878, 0.5889, 0.50424, 0.45045, + 0.41389, 0.39122, 0.38037, 0.39687, 0.41409, 0.43947, 0.45079, 0.45724, + 0.45088, 0.43223, 0.41434, 0.39601, 0.36492, 0.2528, 0.15643, 0.11629, + 0.09957, 0.09278, 0.08992, 0.09279, 0.10536, 0.12135, 0.13208, 0.13976 +] + +# Dictionary to store simulated average Cd for each Re +simulated_cds = {} + +# Main Script +# =========== +# Loop over each Reynolds number +for Re in reynolds_numbers: + # Initialize XLB + wp.clear_kernel_cache() + xlb.init( + velocity_set=velocity_set, + default_backend=compute_backend, + default_precision_policy=precision_policy, + ) + + # Generate mesh (done once, as it's the same for all Re) + if mesher_type == "makemesh": + level_data, body_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins = generate_makemesh_mesh( + stl_filename, voxel_size + ) + elif mesher_type == "cuboid": + level_data, body_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins = generate_cuboid_mesh( + stl_filename, voxel_size + ) + else: + raise ValueError(f"Invalid mesher_type: {mesher_type}. Must be 'makemesh' or 'cuboid'.") + + # Characteristic length + L = partSize[0] + L = float(L) # Cast to built-in float to avoid NumPy type propagation issues with Warp + + # Compute u_physical for this Re + u_physical = Re * kinematic_viscosity / L + + # Set script name based on Re + if Re >= 1000000: + re_str = f"Re{int(Re / 1000000)}M" + elif Re >= 1000: + re_str = f"Re{int(Re / 1000)}K" + else: + re_str = f"Re{Re}" + script_name = f"{base_script_name} {re_str}" + + # Calculate lattice parameters + delta_x_coarse = voxel_size * 2 ** (actual_num_levels - 1) + delta_t = voxel_size * ulb / u_physical + nu_lattice = kinematic_viscosity * delta_t / (voxel_size ** 2) + + omega = 1.0 / (3.0 * nu_lattice + 0.5) + + # Create output directory + current_dir = os.path.join(os.path.dirname(__file__)) + output_dir = os.path.join(current_dir, script_name) + if os.path.exists(output_dir): + shutil.rmtree(output_dir) + os.makedirs(output_dir) + + # Define exporter objects + field_name_cardinality_dict = {"velocity": 3, "density": 1} + h5exporter = MultiresIO( + field_name_cardinality_dict, + level_data, + scale=voxel_size, + offset=-shift, + timestep_size=delta_t, + ) + bc_mask_exporter = MultiresIO({"bc_mask": 1}, level_data) + + # Create grid + grid = multires_grid_factory( + grid_shape_zip, + velocity_set=velocity_set, + sparsity_pattern_list=sparsity_pattern, + sparsity_pattern_origins=[neon.Index_3d(*box_origin) for box_origin in level_origins], + ) + + # Calculate num_steps + coarsest_level = grid.count_levels - 1 + grid_shape_x_coarsest = grid.level_to_shape(coarsest_level)[0] + num_steps = int(flow_passes * (grid_shape_x_coarsest / ulb)) + + # Calculate print and file output intervals + print_interval = max(1, int(num_steps * (print_interval_percentage / 100.0))) + crossover_step = int(num_steps * (file_output_crossover_percentage / 100.0)) + file_output_interval_pre_crossover = max(1, int(crossover_step / num_file_outputs_pre_crossover)) if num_file_outputs_pre_crossover > 0 else num_steps + 1 + file_output_interval_post_crossover = max(1, int((num_steps - crossover_step) / num_file_outputs_post_crossover)) if num_file_outputs_post_crossover > 0 else num_steps + 1 + + # Setup boundary conditions + boundary_conditions = setup_boundary_conditions(grid, level_data, body_vertices, ulb, nu_lattice, compute_backend) + + # Create initializer + initializer = CustomMultiresInitializer( + bc_id=boundary_conditions[-2].id, # bc_outlet + constant_velocity_vector=(ulb, 0.0, 0.0), + velocity_set=velocity_set, + precision_policy=precision_policy, + compute_backend=compute_backend, + ) + + + # Initialize simulation + sim = initialize_simulation(grid, boundary_conditions, omega, initializer) + + # Compute voxel statistics and reference area + stats = compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size) + active_voxels = stats["active_voxels"] + solid_voxels = stats["solid_voxels"] + total_voxels = stats["total_voxels"] + total_lattice_updates_per_step = stats["total_lattice_updates_per_step"] + reference_area = stats["reference_area"] + reference_area_physical = stats["reference_area_physical"] + + # Save initial bc_mask + #bc_mask_exporter.to_hdf5(filename, {"bc_mask": sim.bc_mask}, compression="gzip", compression_opts=0) + + wp.synchronize() + momentum_transfer = MultiresMomentumTransfer( + boundary_conditions[-1], + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + compute_backend=compute_backend, + ) + + # Print simulation info + print("\n" + "=" * 50 + "\n") + print(f"Simulation Configuration for Re = {Re}:") + # print(f"Grid shape at finest level: {grid_shape_zip}") + # print(f"Grid shape at coarsest level: {grid.level_to_shape(coarsest_level)}") + print(f"Number of flow passes: {flow_passes}") + print(f"Calculated iterations: {num_steps:,}") + # print(f"Output directory: {output_dir}") + # print(f"Print interval: {print_interval} steps (every {print_interval_percentage}% of iterations)") + # print(f"File output interval pre-crossover (0-{file_output_crossover_percentage}%): {file_output_interval_pre_crossover} steps") + # print(f"File output interval post-crossover ({file_output_crossover_percentage}-100%): {file_output_interval_post_crossover} steps") + print(f"Finest voxel size: {voxel_size} meters") + print(f"Coarsest voxel size: {delta_x_coarse} meters") + print(f"Total voxels: {sum(np.count_nonzero(mask) for mask in sparsity_pattern):,}") + print(f"Total active voxels: {total_voxels:,}") + print(f"Active voxels per level: {active_voxels}") + print(f"Solid voxels per level: {solid_voxels}") + print(f"Total lattice updates per global step: {total_lattice_updates_per_step:,}") + print(f"Actual number of refinement levels: {actual_num_levels}") + print(f"Physical inlet velocity: {u_physical:.4f} m/s") + print(f"Lattice velocity (ulb): {ulb}") + print(f"Characteristic length: {L: .4f} meters") + # print(f"Kinematic viscosity: {kinematic_viscosity} m^2/s") + print(f"Computed reference area (bc_mask): {reference_area} lattice units") + print(f"Physical reference area (bc_mask): {reference_area_physical:.6f} m^2") + print(f"Reynolds number: {Re:,.2f}") + # print(f"Lattice viscosity: {nu_lattice:.5f}") + print(f"Relaxation parameter (omega): {omega:.5f}") + print("\n" + "=" * 50 + "\n") + + # -------------------------- Simulation Loop -------------------------- + wp.synchronize() + start_time = time.time() + compute_time = 0.0 + steps_since_last_print = 0 + drag_values = [] + + for step in range(num_steps): + step_start = time.time() + sim.step() + wp.synchronize() + compute_time += time.time() - step_start + steps_since_last_print += 1 + if step % print_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2500, + bounds=(0.1, .9, 0.1, .9), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = u_physical*1.5, #eventually we could have the 1.5 read from json as we did before + ) + end_time = time.time() + elapsed = end_time - start_time + total_lattice_updates = total_lattice_updates_per_step * steps_since_last_print + MLUPS = total_lattice_updates / compute_time / 1e6 if compute_time > 0 else 0.0 + current_flow_passes = step * ulb / grid_shape_x_coarsest + remaining_steps = num_steps - step - 1 + time_remaining = 0.0 if MLUPS == 0 else (total_lattice_updates_per_step * remaining_steps) / (MLUPS * 1e6) + hours, rem = divmod(time_remaining, 3600) + minutes, seconds = divmod(rem, 60) + time_remaining_str = f"{int(hours):02d}h {int(minutes):02d}m {int(seconds):02d}s\n" + print(f" \n" + f"Completed step {step}/{num_steps} ({remaining_steps} remaining). \n" + f"Flow Passes: {current_flow_passes:.2f}. \n" + f"Time elapsed for last {steps_since_last_print} steps: {elapsed:.6f} seconds. \n" + f"Compute time: {compute_time:.6f} seconds. \n" + f"MLUPS: {MLUPS:.2f}. \n" + f"Estimated time remaining: {time_remaining_str}") + start_time = time.time() + compute_time = 0.0 + steps_since_last_print = 0 + file_output_interval = file_output_interval_pre_crossover if step < crossover_step else file_output_interval_post_crossover + if step % file_output_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + try: + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=0) + xmf_filename = f"{filename}.xmf" + hdf5_basename = f"{script_name}_{step:04d}.h5" + except Exception as e: + print(f"Error during file output at step {step}: {e}") + wp.synchronize() + + # Save drag and lift data to CSV + if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, script_name) + + # Calculate and print average Cd and Cl for the last 50% + drag_values_array = np.array(drag_values) + if len(drag_values) > 0: + start_index = len(drag_values) // 2 + last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(last_half[:, 0]) + avg_cl = np.mean(last_half[:, 1]) + print(f"Average Drag Coefficient (Cd) for last 50%: {avg_cd:.6f}") + print(f"Average Lift Coefficient (Cl) for last 50%: {avg_cl:.6f}") + # Store the average Cd for this Re + simulated_cds[Re] = avg_cd + else: + print("No drag or lift data collected.") + + # Create or update the comparison plot + plt.figure(figsize=(10, 6)) + plt.semilogx(experimental_re, experimental_cd, label='Experimental', color='blue') + sim_re = sorted(simulated_cds.keys()) + sim_cd = [simulated_cds[r] for r in sim_re] + plt.semilogx(sim_re, sim_cd, label='Simulated', marker='x', linestyle='--', color='red') + plt.xlabel('Reynolds Number (Re)') + plt.ylabel('Drag Coefficient (Cd)') + plt.title(f"{base_script_name} Re vs Cd: Experimental vs Simulated") + plt.legend() + plt.grid(True) + comparison_plot_path = os.path.join(current_dir, f"{base_script_name.replace(' ', '_')}_comparison_plot.png") + plt.savefig(comparison_plot_path) + plt.close() + print(f"Updated comparison plot saved to {comparison_plot_path}") \ No newline at end of file diff --git a/examples/windsor.py b/examples/windsor.py new file mode 100644 index 00000000..42672152 --- /dev/null +++ b/examples/windsor.py @@ -0,0 +1,1878 @@ +import neon +import warp as wp +import numpy as np +import time +import os +import re +import matplotlib.pyplot as plt +import trimesh +import shutil + +import xlb +from xlb.compute_backend import ComputeBackend +from xlb.precision_policy import PrecisionPolicy +from xlb.grid import multires_grid_factory +from xlb.operator.boundary_condition import ( + FullwayBounceBackBC, + HalfwayBounceBackBC, + RegularizedBC, + ExtrapolationOutflowBC, + DoNothingBC, + ZouHeBC, + HybridBC, +) +from xlb.operator.boundary_masker import MeshVoxelizationMethod +from xlb.utils.mesher import make_cuboid_mesh, MultiresIO +from xlb.utils.makemesh import generate_mesh +from xlb.operator.force import MultiresMomentumTransfer +from xlb.helper.initializers import CustomMultiresInitializer +from xlb import MresPerfOptimizationType +from typing import Any + + + + +def adjust_bbox(cuboid_max, cuboid_min, voxel_size_up): + """ + Adjust the bounding box to the nearest points of one level finer grid that encloses the desired region. + + Args: + cuboid_min (np.ndarray): Desired minimum coordinates of the bounding box. + cuboid_max (np.ndarray): Desired maximum coordinates of the bounding box. + voxel_size_up (float): Voxel size of one level higher (finer) grid. + + Returns: + tuple: (adjusted_min, adjusted_max) snapped to grid points of one level higher. + """ + + adjusted_min = np.round(cuboid_min / voxel_size_up) * voxel_size_up + adjusted_max = np.round(cuboid_max / voxel_size_up) * voxel_size_up + return adjusted_min, adjusted_max + +def prepare_sparsity_pattern(level_data): + """ + Prepare the sparsity pattern for the multiresolution grid based on the level data. + """ + sparsity_pattern = [] + level_origins = [] + for lvl in range(len(level_data)): + level_mask = level_data[lvl][0] + level_mask = np.ascontiguousarray(level_mask, dtype=np.int32) + sparsity_pattern.append(level_mask) + level_origins.append(level_data[lvl][2]) + return sparsity_pattern, level_origins + +def make_cuboid_mesh(voxel_size, cuboids, stl_filename): + """ + Create a strongly-balanced multi-level cuboid mesh with a sequence of bounding boxes. + Outputs mask arrays that are set to True only in regions not covered by finer levels. + + Args: + voxel_size (float): Voxel size of the finest grid . + cuboids (list): List of multipliers defining each level's domain. + stl_name (str): Path to the STL file. + + Returns: + list: Level data with mask arrays, voxel sizes, origins, and levels. + """ + # Load the mesh and get its bounding box + mesh = trimesh.load_mesh(stl_filename, process=False) + assert not mesh.is_empty, ValueError("Loaded mesh is empty or invalid.") + + mesh_vertices = mesh.vertices + min_bound = mesh_vertices.min(axis=0) + max_bound = mesh_vertices.max(axis=0) + partSize = max_bound - min_bound + + level_data = [] + adjusted_bboxes = [] + max_voxel_size = voxel_size * pow(2, (len(cuboids) - 1)) + # Step 1: Generate all levels and store their data + for level in range(len(cuboids)): + # Compute desired bounding box for this level + cuboid_min = np.array( + [ + min_bound[0] - cuboids[level][0] * partSize[0], + min_bound[1] - cuboids[level][2] * partSize[1], + min_bound[2] - cuboids[level][4] * partSize[2], + ], + dtype=float, + ) + + cuboid_max = np.array( + [ + max_bound[0] + cuboids[level][1] * partSize[0], + max_bound[1] + cuboids[level][3] * partSize[1], + max_bound[2] + cuboids[level][5] * partSize[2], + ], + dtype=float, + ) + + # Set voxel size for this level + voxel_size_level = max_voxel_size / pow(2, level) + + # Adjust bounding box to align with one level up (finer grid) + if level > 0: + voxel_level_up = max_voxel_size / pow(2, level - 1) + else: + voxel_level_up = voxel_size_level + adjusted_min, adjusted_max = adjust_bbox(cuboid_max, cuboid_min, voxel_level_up) + + xmin, ymin, zmin = adjusted_min + xmax, ymax, zmax = adjusted_max + + # Compute number of voxels based on level-specific voxel size + nx = int(np.round((xmax - xmin) / voxel_size_level)) + ny = int(np.round((ymax - ymin) / voxel_size_level)) + nz = int(np.round((zmax - zmin) / voxel_size_level)) + print(f"Domain {nx}, {ny}, {nz} Origin {adjusted_min} Voxel Size {voxel_size_level} Voxel Level Up {voxel_level_up}") + + voxel_matrix = np.ones((nx, ny, nz), dtype=bool) + + origin = adjusted_min + level_data.append((voxel_matrix, voxel_size_level, origin, level)) + adjusted_bboxes.append((adjusted_min, adjusted_max)) + + # Step 2: Adjust coarser levels to exclude regions covered by finer levels + for k in range(len(level_data) - 1): # Exclude the finest level + # Current level's data + voxel_matrix_k = level_data[k][0] + origin_k = level_data[k][2] + voxel_size_k = level_data[k][1] + nx, ny, nz = voxel_matrix_k.shape + + # Next finer level's bounding box + adjusted_min_k1, adjusted_max_k1 = adjusted_bboxes[k + 1] + + # Compute index ranges in level k that overlap with level k+1's bounding box + # Use epsilon (1e-10) to handle floating-point precision + i_start = max(0, int(np.ceil((adjusted_min_k1[0] - origin_k[0] - 1e-10) / voxel_size_k))) + i_end = min(nx, int(np.floor((adjusted_max_k1[0] - origin_k[0] + 1e-10) / voxel_size_k))) + j_start = max(0, int(np.ceil((adjusted_min_k1[1] - origin_k[1] - 1e-10) / voxel_size_k))) + j_end = min(ny, int(np.floor((adjusted_max_k1[1] - origin_k[1] + 1e-10) / voxel_size_k))) + k_start = max(0, int(np.ceil((adjusted_min_k1[2] - origin_k[2] - 1e-10) / voxel_size_k))) + k_end = min(nz, int(np.floor((adjusted_max_k1[2] - origin_k[2] + 1e-10) / voxel_size_k))) + + # Set overlapping region to zero + voxel_matrix_k[i_start:i_end, j_start:j_end, k_start:k_end] = 0 + + # Step 3 Convert to Indices from STL units + num_levels = len(level_data) + level_data = [(dr, int(v / voxel_size), np.round(dOrigin / v).astype(int), num_levels - 1 - l) for dr, v, dOrigin, l in level_data] + + level_data = list(reversed(level_data)) + sparsity_pattern, level_origins = prepare_sparsity_pattern(level_data) + + return level_data, sparsity_pattern, level_origins + + +class MultiresIO(object): + def __init__(self, field_name_cardinality_dict, levels_data, scale=1, offset=(0.0, 0.0, 0.0), store_precision=None, timestep_size=1): + """ + Initialize the MultiresIO object. + + Parameters + ---------- + field_name_cardinality_dict : dict + A dictionary mapping field names to their cardinalities. + Example: {'velocity_x': 1, 'velocity_y': 1, 'velocity': 3, 'density': 1} + levels_data : list of tuples + Each tuple contains (data, voxel_size, origin, level). + scale : float or tuple, optional + Scale factor for the coordinates. Typically smallest voxel size + offset : tuple, optional + Offset to be applied to the coordinates. + store_precision : str, optional + The precision policy for storing data. + timestep_size: float + Scale factor to convert velocities to model units. Typically smallest timestep size + """ + # Process the multires geometry and extract coordinates and connectivity in the coordinate system of the finest level + coordinates, connectivity, level_id_field, total_cells = self.process_geometry(levels_data, scale) + + # Ensure that coordinates and connectivity are not empty + assert coordinates.size != 0, "Error: No valid data to process. Check the input levels_data." + + # Merge duplicate points + coordinates, connectivity = self._merge_duplicates(coordinates, connectivity, levels_data) + + # Apply scale and offset + coordinates = self._transform_coordinates(coordinates, scale, offset) + + # Assign to self + self.field_name_cardinality_dict = field_name_cardinality_dict + self.levels_data = levels_data + self.coordinates = coordinates + self.connectivity = connectivity + self.level_id_field = level_id_field + self.total_cells = total_cells + self.centroids = np.mean(coordinates[connectivity], axis=1) + + #For convertin velocities to model units + if scale != 1 and timestep_size !=1: + self.conversion = scale / timestep_size + else: + self.conversion = 1 + + # Set the default precision policy if not provided + from xlb import DefaultConfig + + if store_precision is None: + self.store_precision = DefaultConfig.default_precision_policy.store_precision + self.store_dtype = DefaultConfig.default_precision_policy.store_precision.wp_dtype + + # Prepare and allocate the inputs for the NEON container + self.field_warp_dict, self.origin_list = self._prepare_container_inputs() + + # Construct the NEON container for exporting multi-resolution data + self.container = self._construct_neon_container() + + def process_geometry(self, levels_data, scale): + num_voxels_per_level = [np.sum(data) for data, _, _, _ in levels_data] + num_points_per_level = [8 * nv for nv in num_voxels_per_level] + point_id_offsets = np.cumsum([0] + num_points_per_level[:-1]) + + all_corners = [] + all_connectivity = [] + level_id_field = [] + total_cells = 0 + + for level_idx, (data, voxel_size, origin, level) in enumerate(levels_data): + origin = origin * voxel_size + corners_list, conn_list, _ = self._process_level(data, voxel_size, origin, level, point_id_offsets[level_idx]) + + if corners_list: + print(f"\tProcessing level {level}: Voxel size {voxel_size * scale}, Origin {origin}, Shape {data.shape}") + all_corners.extend(corners_list) + all_connectivity.extend(conn_list) + num_cells = sum(c.shape[0] for c in conn_list) + level_id_field.extend([level] * num_cells) + total_cells += num_cells + else: + print(f"\tSkipping level {level} (no unique data)") + + # Stacking coordinates and connectivity + coordinates = np.concatenate(all_corners, axis=0).astype(np.float32) + connectivity = np.concatenate(all_connectivity, axis=0).astype(np.int32) + level_id_field = np.array(level_id_field, dtype=np.uint8) + + return coordinates, connectivity, level_id_field, total_cells + + def _process_level(self, data, voxel_size, origin, level, point_id_offset): + """ + Given a voxel grid, returns all corners and connectivity in NumPy for this resolution level. + """ + true_indices = np.argwhere(data) + if true_indices.size == 0: + return [], [], level + + max_voxels_per_chunk = 268_435_450 + chunks = np.array_split(true_indices, max(1, (len(true_indices) + max_voxels_per_chunk - 1) // max_voxels_per_chunk)) + + all_corners = [] + all_connectivity = [] + pid_offset = point_id_offset + + for chunk in chunks: + if chunk.size == 0: + continue + corners, connectivity = self._process_voxel_chunk(chunk, np.asarray(origin, dtype=np.float32), voxel_size, pid_offset) + all_corners.append(corners) + all_connectivity.append(connectivity) + pid_offset += len(chunk) * 8 + + return all_corners, all_connectivity, level + + def _process_voxel_chunk(self, true_indices, origin, voxel_size, point_id_offset): + """ + Given a set of voxel indices, returns 8 corners and connectivity for each cube using NumPy. + """ + true_indices = np.asarray(true_indices, dtype=np.float32) + mins = origin + true_indices * voxel_size + offsets = np.array( + [ + [0, 0, 0], + [1, 0, 0], + [1, 1, 0], + [0, 1, 0], + [0, 0, 1], + [1, 0, 1], + [1, 1, 1], + [0, 1, 1], + ], + dtype=np.float32, + ) + + corners = (mins[:, None, :] + offsets[None, :, :] * voxel_size).reshape(-1, 3).astype(np.float32) + base_ids = point_id_offset + np.arange(len(true_indices), dtype=np.int32) * 8 + connectivity = (base_ids[:, None] + np.arange(8, dtype=np.int32)).astype(np.int32) + + return corners, connectivity + + def optimal_chunk_shape(self, shape, dtype, target_mb=4, min_chunks=64, max_chunks=4096, max_chunk_mb=64): + """ + Choose a row-major HDF5 chunk shape for compression: + - target_mb: desired uncompressed bytes per chunk (e.g., 4 for gzip, 8–16 for lzf/lz4) + - keeps total number of chunks roughly in [min_chunks, max_chunks] + - caps chunk size at max_chunk_mb to limit per-chunk memory + + Returns a tuple suitable for h5py.create_dataset(..., chunks=...). + """ + + n0 = int(shape[0]) if len(shape) else 1 + itemsize = np.dtype(dtype).itemsize + row_elems = int(np.prod(shape[1:], dtype=np.int64)) if len(shape) > 1 else 1 + row_bytes = max(1, row_elems * itemsize) + + # initial rows by target size + target_bytes = int(target_mb * 1024 * 1024) + rows = max(1, target_bytes // row_bytes) + + # clamp by desired chunk-count window + lower_rows = max(1, (n0 + max_chunks - 1) // max_chunks) # ensures <= max_chunks + upper_rows = max(1, n0 // max_chunks if min_chunks == 0 else n0 // min_chunks) # ensures >= min_chunks + rows = min(max(rows, lower_rows), max(1, upper_rows)) + + # cap by max bytes per chunk + max_bytes = int(max_chunk_mb * 1024 * 1024) + rows = min(rows, max(1, max_bytes // row_bytes)) + + return (min(n0, rows),) + tuple(shape[1:]) + + def save_xdmf(self, h5_filename, xmf_filename, total_cells, num_points, fields={}): + # Generate an XDMF file to accompany the HDF5 file + print(f"\tGenerating XDMF file: {xmf_filename}") + hdf5_rel_path = h5_filename.split("/")[-1] + with open(xmf_filename, "w") as xmf: + xmf.write(f''' + + + + + + + {hdf5_rel_path}:/Mesh/Connectivity + + + + + {hdf5_rel_path}:/Mesh/Points + + + + + {hdf5_rel_path}:/Mesh/Level + + + ''') + for field_name in fields.keys(): + xmf.write(f''' + + + {hdf5_rel_path}:/Fields/{field_name} + + + ''') + xmf.write(""" + + + + """) + print("\tXDMF file written successfully") + return + + def save_hdf5_file(self, filename, coordinates, connectivity, level_id_field, fields_data, compression="gzip", compression_opts=0): + """Write the processed mesh data to an HDF5 file. + Parameters + ---------- + filename : str + The name of the output HDF5 file. + coordinates : numpy.ndarray + An array of all coordinates. + connectivity : numpy.ndarray + An array of all connectivity data. + level_id_field : numpy.ndarray + An array of all level data. + fields_data : dict + A dictionary of all field data. + compression : str, optional + The compression method to use for the HDF5 file. + compression_opts : int, optional + The compression options to use for the HDF5 file. + """ + import h5py + + pts_chunks = self.optimal_chunk_shape(coordinates.shape, np.float32, target_mb=4) + conn_chunks = self.optimal_chunk_shape(connectivity.shape, np.int32, target_mb=4) + lvl_chunks = self.optimal_chunk_shape(level_id_field.shape, np.uint8, target_mb=4) + fld_chunks = self.optimal_chunk_shape((self.total_cells,), np.float32, target_mb=4) + + with h5py.File(filename + ".h5", "w") as f: + f.create_dataset("/Mesh/Points", data=coordinates, compression=compression, compression_opts=compression_opts, chunks=pts_chunks, shuffle=True) + f.create_dataset("/Mesh/Connectivity", data=connectivity, compression=compression, compression_opts=compression_opts, chunks=conn_chunks, shuffle=True) + f.create_dataset("/Mesh/Level", data=level_id_field, compression=compression, compression_opts=compression_opts, chunks=lvl_chunks, shuffle=True) + fg = f.create_group("/Fields") + for fname, fdata in fields_data.items(): + #Convert lbm velocity to model velocity + if "velocity" in fname.lower(): + fdata = fdata * self.conversion + fg.create_dataset(fname, data=fdata.astype(np.float32), compression=compression, compression_opts=compression_opts, chunks=fld_chunks, shuffle=True) + + def _merge_duplicates(self, coordinates, connectivity, levels_data): + # Merging duplicate points + tolerance = 0.01 + chunk_size = 10_000_000 # Adjust based on GPU memory + num_points = coordinates.shape[0] + unique_points = [] + mapping = np.zeros(num_points, dtype=np.int32) + unique_idx = 0 + + # Get the grid shape of computational box at the finest level from the levels_data + num_levels = len(levels_data) + grid_shape_finest = np.array(levels_data[-1][0].shape) * 2 ** (num_levels - 1) + + for start in range(0, num_points, chunk_size): + end = min(start + chunk_size, num_points) + coords_chunk = coordinates[start:end] + + # Simple hashing: grid coordinates as tuple keys + grid_coords = np.round(coords_chunk / tolerance).astype(np.int64) + hash_keys = grid_coords[:, 0] + grid_coords[:, 1] * grid_shape_finest[0] + grid_coords[:, 2] * grid_shape_finest[0] * grid_shape_finest[1] + unique_hash, inverse = np.unique(hash_keys, return_inverse=True) + unique_hash, unique_indices, inverse = np.unique(hash_keys, return_index=True, return_inverse=True) + unique_chunk = coords_chunk[unique_indices] + + unique_points.append(unique_chunk) + mapping[start:end] = inverse + unique_idx + unique_idx += len(unique_hash) + + coordinates = np.concatenate(unique_points) + connectivity = mapping[connectivity] + return coordinates, connectivity + + def _transform_coordinates(self, coordinates, scale, offset): + scale = np.array([scale] * 3 if isinstance(scale, (int, float)) else scale, dtype=np.float32) + offset = np.array(offset, dtype=np.float32) + return coordinates * scale + offset + + def _prepare_container_inputs(self): + # load necessary modules + from xlb.compute_backend import ComputeBackend + from xlb.grid import grid_factory + + # Get the number of levels from the levels_data + num_levels = len(self.levels_data) + + # Prepare lists to hold warp fields and origins allocated for each level + field_warp_dict = {} + origin_list = [] + for field_name, cardinality in self.field_name_cardinality_dict.items(): + field_warp_dict[field_name] = [] + for level in range(num_levels): + # get the shape of the grid at this level + box_shape = self.levels_data[level][0].shape + + # Use the warp backend to create dense fields to be written in multi-res NEON fields + grid_dense = grid_factory(box_shape, compute_backend=ComputeBackend.WARP) + field_warp_dict[field_name].append(grid_dense.create_field(cardinality=cardinality, dtype=self.store_precision)) + origin_list.append(wp.vec3i(*([int(x) for x in self.levels_data[level][2]]))) + + return field_warp_dict, origin_list + + def _construct_neon_container(self): + """ + Constructs a NEON container for exporting multi-resolution data to HDF5. + This container will be used to transfer multi-resolution NEON fields into stacked warp fields. + """ + + @neon.Container.factory(name="HDF5MultiresExporter") + def container( + field_neon: Any, + field_warp: Any, + origin: Any, + level: Any, + ): + def launcher(loader: neon.Loader): + loader.set_mres_grid(field_neon.get_grid(), level) + field_neon_hdl = loader.get_mres_read_handle(field_neon) + refinement = 2**level + + @wp.func + def kernel(index: Any): + cIdx = wp.neon_global_idx(field_neon_hdl, index) + # Get local indices by dividing the global indices (associated with the finest level) by 2^level + # Subtract the origin to get the local indices in the warp field + lx = wp.neon_get_x(cIdx) // refinement - origin[0] + ly = wp.neon_get_y(cIdx) // refinement - origin[1] + lz = wp.neon_get_z(cIdx) // refinement - origin[2] + + # write the values to the warp field + cardinality = field_warp.shape[0] + for card in range(cardinality): + field_warp[card, lx, ly, lz] = self.store_dtype(wp.neon_read(field_neon_hdl, index, card)) + + loader.declare_kernel(kernel) + + return launcher + + return container + + def get_fields_data(self, field_neon_dict): + """ + Extracts and prepares the fields data from the NEON fields for export. + """ + # Check if the field_neon_dict is empty + if not field_neon_dict: + return {} + + # Ensure that this operator is called on multires grids + grid_mres = next(iter(field_neon_dict.values())).get_grid() + assert grid_mres.name== "mGrid", f"Operation {self.__class__.__name} is only applicable to multi-resolution cases" + + for field_name in field_neon_dict.keys(): + assert field_name in self.field_name_cardinality_dict.keys(), ( + f"Field {field_name} is not provided in the instantiation of the MultiresIO class!" + ) + + # number of levels + num_levels = grid_mres.num_levels + assert num_levels == len(self.levels_data), "Error: Inconsistent number of levels!" + + # Prepare the fields dictionary to be written by transfering multi-res NEON fields into stacked warp fields and then numpy arrays + fields_data = {} + for field_name, cardinality in self.field_name_cardinality_dict.items(): + if field_name not in field_neon_dict: + continue + for card in range(cardinality): + fields_data[f"{field_name}_{card}"] = [] + + # Iterate over each field and level to fill the dictionary with numpy fields + for field_name, cardinality in self.field_name_cardinality_dict.items(): + if field_name not in field_neon_dict: + continue + for level in range(num_levels): + # Create the container and run it to fill the warp fields + c = self.container(field_neon_dict[field_name], self.field_warp_dict[field_name][level], self.origin_list[level], level) + c.run(0, container_runtime=neon.Container.ContainerRuntime.neon) + + # Ensure all operations are complete before converting to JAX and Numpy arrays + wp.synchronize() + + # Convert the warp fields to numpy arrays and use level's mask to filter the data + mask = self.levels_data[level][0] + field_np = self.field_warp_dict[field_name][level].numpy() + for card in range(cardinality): + field_np_card = field_np[card][mask] + fields_data[f"{field_name}_{card}"].append(field_np_card) + + # Concatenate all field data + for field_name in fields_data.keys(): + fields_data[field_name] = np.concatenate(fields_data[field_name]) + assert fields_data[field_name].size == self.total_cells, f"Error: Field {field_name} size mismatch!" + + return fields_data + + def to_hdf5(self, output_filename, field_neon_dict, compression="gzip", compression_opts=0, store_precision=None): + """ + Export the multi-resolution mesh data to an HDF5 file. + Parameters + ---------- + output_filename : str + The name of the output HDF5 file (without extension). + field_neon_dict : a dictionary of neon mGrid Fields + Eg. The NEON fields containing velocity and density data as { "velocity": velocity_neon, "density": density_neon} + compression : str, optional + The compression method to use for the HDF5 file. + compression_opts : int, optional + The compression options to use for the HDF5 file. + store_precision : str, optional + The precision policy for storing data in the HDF5 file. + """ + import time + + # Get the fields data from the NEON fields + fields_data = self.get_fields_data(field_neon_dict) + + # Save XDMF file + self.save_xdmf(output_filename + ".h5", output_filename + ".xmf", self.total_cells, len(self.coordinates), fields_data) + + # Writing HDF5 file + print("\tWriting HDF5 file") + tic_write = time.perf_counter() + self.save_hdf5_file(output_filename, self.coordinates, self.connectivity, self.level_id_field, fields_data, compression, compression_opts) + toc_write = time.perf_counter() + print(f"\tHDF5 file written in {toc_write - tic_write:0.1f} seconds") + + def to_slice_image( + self, + output_filename, + field_neon_dict, + plane_point, + plane_normal, + slice_thickness=1.0, + bounds=[0, 1, 0, 1], + grid_res=512, + cmap=None, + component=None, + show_axes=True, + show_colorbar=True, + normalize=1.0, + output=None, + **kwargs, + ): + """ + Export an arbitrary-plane slice from unstructured point data to PNG. + + Parameters + ---------- + output_filename : str + Output PNG filename (without extension). + field_neon_dict : dict + A dictionary of NEON fields containing the data to be plotted. + Example: {"velocity": velocity_neon, "density": density_neon} + plane_point : array_like + A point [x, y, z] on the plane. + plane_normal : array_like + Plane normal vector [nx, ny, nz]. + slice_thickness : float + How thick (in units of the coordinate system) the slice should be. + grid_resolution : tuple + Resolution of output image (pixels in plane u, v directions). + grid_size : tuple + Physical size of slice grid (width, height). + cmap : str + Matplotlib colormap. + normalize : float + Factor to scale and normalize data to ensure consistent images + output: str + None = png output, 'array' = no PNG and returns array of results, 'both' = png and returns array + """ + + # Get the fields data from the NEON fields + assert len(field_neon_dict.keys()) == 1, "Error: This function is designed to plot a single field at a time." + fields_data = self.get_fields_data(field_neon_dict) + + # Check if the component is within the valid range + if component is None: + print("\tCreating slice image of the field magnitude!") + cell_data = list(fields_data.values()) + squared = [comp**2 for comp in cell_data] + cell_data = np.sqrt(sum(squared)) + field_name = list(fields_data.keys())[0].split("_")[0] + "_magnitude" + else: + assert component < max(self.field_name_cardinality_dict.values()), ( + f"Error: Component {component} is out of range for the provided fields." + ) + print(f"\tCreating slice image for component {component} of the input field!") + field_name = list(fields_data.keys())[component] + cell_data = fields_data[field_name] + if "velocity" in field_name.lower(): + cell_data = cell_data * self.conversion + if normalize != 1.0: + cell_data = np.clip(cell_data / normalize,0,1) + else: + cell_data = cell_data + # Plot each field in the dictionary + result = self._to_slice_image_single_field( + f"{output_filename}_{field_name}", + cell_data, + plane_point, + plane_normal, + slice_thickness=slice_thickness, + bounds=bounds, + grid_res=grid_res, + cmap=cmap, + show_axes=show_axes, + show_colorbar=show_colorbar, + output=output, + **kwargs, + ) + if output == 'array': + return result + elif output == 'both': + print(f"\tSlice image for field {field_name} saved as {output_filename}.png") + return result + else: + print(f"\tSlice image for field {field_name} saved as {output_filename}.png") + + def _to_slice_image_single_field( + self, + output_filename, + field_data, + plane_point, + plane_normal, + slice_thickness, + bounds, + grid_res, + cmap, + show_axes, + show_colorbar, + output, + **kwargs, + ): + """ + Helper function to create a slice image for a single field. + """ + from matplotlib import cm + import numpy as np + import matplotlib.pyplot as plt + from scipy.spatial import cKDTree + + + # field data are associated with the cells centers + cell_values = field_data + + # get the normalized plane normal + plane_normal = np.asarray(np.abs(plane_normal)) + n = plane_normal / np.linalg.norm(plane_normal) + + # Compute signed distances of each cell center to the plane + plane_point *= plane_normal + sdf = np.dot(self.centroids - plane_point, n) + + # Filter: cells with centroid near plane + mask = np.abs(sdf) <= slice_thickness / 2 + if not np.any(mask): + raise ValueError("No cells intersect the plane within thickness.") + + # Project centroids to plane + centroids_slice = self.centroids[mask] + sdf_slice = sdf[mask] + proj = centroids_slice - np.outer(sdf_slice, n) + + values = cell_values[mask] + + # Build in-plane basis + if np.allclose(n, [1, 0, 0]): + u1 = np.array([0, 1, 0]) + else: + u1 = np.array([1, 0, 0]) + u2 = np.abs(np.cross(n, u1)) + + local_x = np.dot(proj - plane_point, u1) + local_y = np.dot(proj - plane_point, u2) + + # Define extent of the plot + xmin, xmax, ymin, ymax = local_x.min(), local_x.max(), local_y.min(), local_y.max() + Lx = xmax - xmin + Ly = ymax - ymin + extent = np.array([xmin + bounds[0] * Lx, xmin + bounds[1] * Lx, ymin + bounds[2] * Ly, ymin + bounds[3] * Ly]) + mask_bounds = (extent[0] <= local_x) & (local_x <= extent[1]) & (extent[2] <= local_y) & (local_y <= extent[3]) + + if cmap is None: + cmap = cm.nipy_spectral + + # Adjust vertical resolution based on bounds + bounded_x_min = local_x[mask_bounds].min() + bounded_x_max = local_x[mask_bounds].max() + bounded_y_min = local_y[mask_bounds].min() + bounded_y_max = local_y[mask_bounds].max() + width_x = bounded_x_max - bounded_x_min + height_y = bounded_y_max - bounded_y_min + aspect_ratio = height_y / width_x + grid_resY = max(1, int(np.round(grid_res*aspect_ratio))) + # Create grid + grid_x = np.linspace(bounded_x_min, bounded_x_max, grid_res) + grid_y = np.linspace(bounded_y_min, bounded_y_max, grid_resY) + xv, yv = np.meshgrid(grid_x, grid_y, indexing="xy") + + # Fast KDTree-based interpolation + points = np.column_stack((local_x[mask_bounds], local_y[mask_bounds])) + tree = cKDTree(points) + + # Query points + query_points = np.column_stack((xv.ravel(), yv.ravel())) + + # Find k nearest neighbors for smoother interpolation + k = min(4, len(points)) # Use 4 neighbors or less if not enough points + distances, indices = tree.query(query_points, k=k, workers=-1) #-1 uses all cores + + # Inverse distance weighting + epsilon = 1e-10 + weights = 1.0 / (distances + epsilon) + weights /= weights.sum(axis=1, keepdims=True) + + # Interpolate values + neighbor_values = values[mask_bounds][indices] + grid_field = (neighbor_values * weights).sum(axis=1).reshape(grid_resY, grid_res) + + if output == 'array': + return grid_field + # Plot + if show_colorbar or show_axes: + dpi = 300 + plt.imshow( + grid_field, + extent=[bounded_x_min, bounded_x_max, bounded_y_min, bounded_y_max], + cmap=cmap, + origin="lower", + aspect="equal", + **kwargs, + ) + if show_colorbar: + plt.colorbar() + if not show_axes: + plt.axis('off') + plt.savefig(output_filename + ".png", dpi=dpi, bbox_inches="tight", pad_inches=0) + plt.close() + else: + plt.imsave(output_filename + ".png", grid_field, cmap=cmap, origin="lower") + + if output == 'both': + return grid_field + + def _to_slice_image_single_field2( + self, + output_filename, + field_data, + plane_point, + plane_normal, + slice_thickness, + bounds, + grid_res, + cmap, + show_axes, + show_colorbar, + **kwargs, + ): + """ + Helper function to create a slice image for a single field. + """ + from matplotlib import cm + import numpy as np + import matplotlib.pyplot as plt + from scipy.interpolate import griddata + from scipy.spatial import cKDTree + tic = time.time() + # field data are associated with the cells centers + cell_values = field_data + + # get the normalized plane normal + plane_normal = np.asarray(np.abs(plane_normal)) + n = plane_normal / np.linalg.norm(plane_normal) + + # Compute centroids (K = 8 for hexahedral cells) + #cell_points = self.coordinates[self.connectivity] + #centroids = np.mean(cell_points, axis=1) + centroids = self.centroids + + # Compute signed distances of each cell center to the plane + plane_point *= plane_normal + sdf = np.dot(centroids - plane_point, n) + + # Filter: cells with centroid near plane + mask = np.abs(sdf) <= slice_thickness / 2 + if not np.any(mask): + raise ValueError("No cells intersect the plane within thickness.") + + # Project centroids to plane + centroids_slice = centroids[mask] + sdf_slice = sdf[mask] + proj = centroids_slice - np.outer(sdf_slice, n) + + values = cell_values[mask] + + # Build in-plane basis + if np.allclose(n, [1, 0, 0]): + u1 = np.array([0, 1, 0]) + else: + u1 = np.array([1, 0, 0]) + u2 = np.abs(np.cross(n, u1)) + + local_x = np.dot(proj - plane_point, u1) + local_y = np.dot(proj - plane_point, u2) + + # Define extent of the plot + xmin, xmax, ymin, ymax = local_x.min(), local_x.max(), local_y.min(), local_y.max() + Lx = xmax - xmin + Ly = ymax - ymin + extent = np.array([xmin + bounds[0] * Lx, xmin + bounds[1] * Lx, ymin + bounds[2] * Ly, ymin + bounds[3] * Ly]) + mask_bounds = (extent[0] <= local_x) & (local_x <= extent[1]) & (extent[2] <= local_y) & (local_y <= extent[3]) + + if cmap is None: + cmap = cm.nipy_spectral + + #Adjust vertical resolution based on bounds + # Compute bounded ranges + bounded_x_min = local_x[mask_bounds].min() + bounded_x_max = local_x[mask_bounds].max() + bounded_y_min = local_y[mask_bounds].min() + bounded_y_max = local_y[mask_bounds].max() + width_x = bounded_x_max - bounded_x_min + height_y = bounded_y_max - bounded_y_min + aspect_ratio = height_y / width_x + grid_resY = max(1, int(np.round(grid_res*aspect_ratio))) + print(f" ******Time to slice griddata {time.time()-tic}") + # Rasterize: scatter cell centers to 2D grid + grid_x = np.linspace(local_x[mask_bounds].min(), local_x[mask_bounds].max(), grid_res) + grid_y = np.linspace(local_y[mask_bounds].min(), local_y[mask_bounds].max(), grid_resY) + xv, yv = np.meshgrid(grid_x, grid_y, indexing="xy") + + # Linear interpolation for each grid point + grid_field = griddata(points=(local_x, local_y), values=values, xi=(xv, yv), method="linear", fill_value=np.nan) + print(f" *****Time to after griddata {time.time()-tic}") + # Plot + if show_colorbar or show_axes: + # Plot + dpi = 300 + plt.imshow( + grid_field, + extent=[xmin, xmax, ymin, ymax], + cmap=cmap, + origin="lower", + aspect="equal", + **kwargs, + ) + if show_colorbar: + plt.colorbar() + if not show_axes: + plt.axis('off') + plt.savefig(output_filename + ".png", dpi=300, bbox_inches="tight", pad_inches=0) + plt.close() + else: + plt.imsave(output_filename + ".png", grid_field, cmap=cmap, origin="lower") + + def to_line(self, + output_filename, + field_neon_dict, + start_point, + end_point, + resolution, + component=None, + radius=1.0, + **kwargs,): + """ + Extract field data along a line between start_point and end_point and save to a CSV file. + + This function performs two main steps: + 1. Extracts field data from field_neon_dict, handling components or computing magnitude. + 2. Interpolates the field values along a line defined by start_point and end_point, + then saves the results (coordinates and field values) to a CSV file. + + Parameters + ---------- + output_filename : str + The name of the output CSV file (without extension). Example: "velocity_profile". + field_neon_dict : dict + A dictionary containing the field data to extract, with a single key-value pair. + The key is the field name (e.g., "velocity"), and the value is the NEON data object + containing the field values. Example: {"velocity": velocity_neon}. + start_point : array_like + The starting point of the line in 3D space (e.g., [x0, y0, z0]). + Units must match the coordinate system used in the class (voxel units if untransformed, + or model units if scale/offset are applied). + end_point : array_like + The ending point of the line in 3D space (e.g., [x1, y1, z1]). + Units must match the coordinate system used in the class. + resolution : int + The number of points along the line where the field will be interpolated. + Example: 100 for 100 evenly spaced points. + component : int, optional + The specific component of the field to extract (e.g., 0 for x-component, 1 for y-component). + If None, the magnitude of the field is computed. Default is None. + radius : int + The specified distance (in units of the coordinate system) to prefilter and query for line plot + + Returns + ------- + None + The function writes the output to a CSV file and prints a confirmation message. + + Notes + ----- + - The output CSV file will contain columns: 'x', 'y', 'z', and the value of the field name (e.g., 'velocity_x' or 'velocity_magnitude'). + """ + + + # Get the fields data from the NEON fields + assert len(field_neon_dict.keys()) == 1, "Error: This function is designed to plot a single field at a time." + fields_data = self.get_fields_data(field_neon_dict) + + # Check if the component is within the valid range + if component is None: + print("\tCreating csv plot of the field magnitude!") + cell_data = list(fields_data.values()) + squared = [comp**2 for comp in cell_data] + cell_data = np.sqrt(sum(squared)) + field_name = list(fields_data.keys())[0].split("_")[0] + "_magnitude" + + else: + assert component < max(self.field_name_cardinality_dict.values()), ( + f"Error: Component {component} is out of range for the provided fields." + ) + print(f"\tCreating csv plot for component {component} of the input field!") + field_name = list(fields_data.keys())[component] + cell_data = fields_data[field_name] + + if "velocity" in field_name.lower(): + cell_data = cell_data * self.conversion + + # Plot each field in the dictionary + self._to_line_field( + f"{output_filename}_{field_name}", + cell_data, + start_point, + end_point, + resolution, + radius=radius, + **kwargs, + ) + print(f"\tLine Plot for field {field_name} saved as {output_filename}.csv") + + def _to_line_field( + self, + output_filename, + cell_data, + start_point, + end_point, + resolution, + radius, + **kwargs, + ): + """ + Helper function to create a line plot for a single field. + """ + import numpy as np + + #cell_points = self.coordinates[self.connectivity] # Shape: (M, K, 3), where M is num cells, K is nodes per cell + #centroids = np.mean(cell_points, axis=1) # Shape: (M, 3) + centroids = self.centroids + p0 = np.array(start_point, dtype=np.float32) + p1 = np.array(end_point, dtype=np.float32) + + # direction and parameter t for each centroid + d = (p1 - p0) + L = np.linalg.norm(d) + d_unit = d / L + v = centroids - p0 + t = v.dot(d_unit) + closest = p0 + np.outer(t, d_unit) + perp_dist = np.linalg.norm(centroids-closest, axis=1) + + # optionally mask to [0,L] or a small perp-radius + mask = (t >= 0) & (t <= L) & (perp_dist <= radius) + t, data = t[mask], cell_data[mask] + + # sort by t + idx = np.argsort(t) + t_sorted = t[idx] + data_sorted = data[idx] + + # target samples + t_line = np.linspace(0, L, resolution) + + # 1D linear interpolation + vals_line = np.interp(t_line, t_sorted, data_sorted, left=np.nan, right=np.nan) + + # reconstruct (x,y,z) + line_xyz = p0[None,:] + t_line[:,None]*d_unit[None,:] + + # vectorized CSV dump + out = np.hstack([line_xyz, vals_line[:,None]]) + np.savetxt( + output_filename + '.csv', + out, + delimiter=',', + header='x,y,z,value', + comments='' + ) + +wp.clear_kernel_cache() +wp.config.quiet = True + +# User Configuration +# ================= +# Physical and simulation parameters +voxel_size = 0.001 # Finest voxel size in meters +ulb = 0.05 # Lattice velocity +u_physical = 38.0 # Physical inlet velocity in m/s (user input) +flow_passes = 3 # Domain flow passes +kinematic_viscosity = 1.508e-3 # Kinematic viscosity of air in m^2/s 1.508e-5 + +trim = True +trim_voxels = 5 +# STL filename +stl_filename = "examples/stl/windsor_nw.stl" +script_name = "Windsor_1mm_mm2" + +# I/O settings +print_interval_percentage = 1 # Print every 1% of iterations +file_output_crossover_percentage = 80 # Crossover at 50% of iterations +num_file_outputs_pre_crossover = 2 # Outputs before crossover +num_file_outputs_post_crossover = 8 # Outputs after crossover + +# Other setup parameters +compute_backend = ComputeBackend.NEON +precision_policy = PrecisionPolicy.FP32FP32 +velocity_set = xlb.velocity_set.D3Q27(precision_policy=precision_policy, compute_backend=compute_backend) + +# Choose mesher type +mesher_type = "makemesh" # Options: "makemesh" or "cuboid" + +# Mesh Generation Functions +# ========================= +def generate_makemesh_mesh(stl_filename, voxel_size, trim, trim_voxels, ground_refinement_level=-1, ground_voxel_height=6): + """ + Generate a makemesh mesh based on the provided voxel size in meters, domain multipliers, and padding values. + """ + # Number of requested refinement levels + num_levels = 6 + + # Domain multipliers for the full domain + domain_multiplier = { + "-x": 2.4, + "x": 3.5, + "-y": 1.7, + "y": 1.7, + "-z": 0.0, + "z": 4, + } + + padding_values = [ + [10, 20, 10, 10, 10, 10], + [20, 20, 20, 20, 20, 20], + [10, 20, 10, 10, 10, 10], + [10, 20, 10, 10, 10, 10], + [10, 20, 10, 10, 10, 10], + [10, 20, 10, 10, 10, 10], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + [6, 6, 6, 6, 6, 6], + + ] + + + # Load the mesh + mesh = trimesh.load_mesh(stl_filename, process=False) + if mesh.is_empty: + raise ValueError("Loaded mesh is empty or invalid.") + + # Compute original bounds + min_bound = mesh.vertices.min(axis=0) + max_bound = mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + x0 = [max_bound[0]-0.603, min_bound[1]+(0.5*partSize[1]), min_bound[2]] #Center of wheelbase for Drivaer + + + # Compute translation to put mesh into first octant of the domain + shift = np.array( + [ + domain_multiplier["-x"] * partSize[0] - min_bound[0], + domain_multiplier["-y"] * partSize[1] - min_bound[1], + domain_multiplier["-z"] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + + # Apply translation and save out temp STL + mesh.apply_translation(shift) + _ = mesh.vertex_normals + mesh.export("temp.stl") + # Generate mesh using make_cuboid_mesh + # Generate mesh using generate_mesh with ground refinement + level_data, _, sparsity_pattern, level_origins = generate_mesh( + num_levels, + "temp.stl", + voxel_size, + padding_values, + domain_multiplier, + ground_refinement_level=ground_refinement_level, + ground_voxel_height=ground_voxel_height, + ) + + if trim == True: + zShift = trim_voxels + plane_origin = np.array([0, 0, mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export('temp.stl') + body_stl = 'temp.stl' + mesh = trimesh.load_mesh(body_stl, process=False) + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + else: + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + + + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {num_levels}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + #os.remove("temp.stl") + + return level_data, mesh_vertices, tuple([int(a) for a in grid_shape_finest]), partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 + +def generate_cuboid_mesh(stl_filename, voxel_size, trim, trim_voxels): + """ + Alternative cuboid mesh generation based on Apolo's method with domain multipliers per level. + """ + # Domain multipliers for each refinement level + #domain_multiplier = [ + # [3.0, 4.0, 2.5, 2.5, 0.0, 4.0], # -x, x, -y, y, -z, z0.17361 + # [1.2, 1.25, 1.75, 1.75, 0.0, 1.5], # -x, x, -y, y, -z, z + # [0.8, 1.0, 1.25, 1.25, 0.0, 1.2], # -x, x, -y, y, -z, z + # [0.4, 0.4, 0.25, 0.25, 0.0, 0.25], + + #] + + domain_multiplier = [ + [2.0, 3.5, 1.5, 1.5, 0.0, 3.7], # -x, x, -y, y, -z, z + # [1.8, 1.6, 1.2, 1.2 , 0.0, 2.0], # -x, x, -y, y, -z, z + [1.4, 1.25, 1.0, 1.0, 0.0, 1.6], # -x, x, -y, y, -z, z + #[0.8, 1.0, 0.6, 0.6, 0.0, 1.2], + #[0.4, 0.4, 0.25, 0.25, 0.0, 0.25], # -x, x, -y, y, -z, z + [0.55, 0.65, 0.65, 0.65, 0.0, 0.65], + [0.25, 0.25, 0.22, 0.22, 0.0, 0.25], + + ] + + # Load the mesh + mesh = trimesh.load_mesh(stl_filename, process=False) + if mesh.is_empty: + raise ValueError("Loaded mesh is empty or invalid.") + + # Compute original bounds + min_bound = mesh.vertices.min(axis=0) + max_bound = mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + x0 = [max_bound[0]-0.603, min_bound[1]+(0.5*partSize[1]), min_bound[2]] #Center of wheelbase for Drivaer + # Compute translation to put mesh into first octant of the domain + shift = np.array( + [ + domain_multiplier[0][0] * partSize[0] - min_bound[0], + domain_multiplier[0][2] * partSize[1] - min_bound[1], + domain_multiplier[0][4] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + + # Apply translation and save out temp STL + mesh.apply_translation(shift) + _ = mesh.vertex_normals + mesh.export("temp.stl") + # Generate mesh using make_cuboid_mesh + level_data, sparsity_pattern, level_origins = make_cuboid_mesh( + voxel_size, + domain_multiplier, + "temp.stl", + ) + if trim == True: + zShift = trim_voxels + plane_origin = np.array([0, 0, mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export('temp.stl') + body_stl = 'temp.stl' + mesh = trimesh.load_mesh(body_stl, process=False) + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + else: + mesh_vertices = np.asarray(mesh.vertices) / voxel_size + + + + + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {len(domain_multiplier)}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + #os.remove("temp.stl") + + return level_data, mesh_vertices, tuple([int(a) for a in grid_shape_finest]), partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 + +# Boundary Conditions Setup +# ========================= +def setup_boundary_conditions(grid, level_data, body_vertices, ulb, compute_backend=ComputeBackend.NEON): + """ + Set up boundary conditions for the simulation. + """ + num_levels = len(level_data) + coarsest_level = num_levels - 1 + box = grid.bounding_box_indices(shape=grid.level_to_shape(coarsest_level)) + left_indices = grid.boundary_indices_across_levels(level_data, box_side="left", remove_edges=True) + right_indices = grid.boundary_indices_across_levels(level_data, box_side="right", remove_edges=True) + top_indices = grid.boundary_indices_across_levels(level_data, box_side="top", remove_edges=False) + bottom_indices = grid.boundary_indices_across_levels(level_data, box_side="bottom", remove_edges=False) + front_indices = grid.boundary_indices_across_levels(level_data, box_side="front", remove_edges=False) + back_indices = grid.boundary_indices_across_levels(level_data, box_side="back", remove_edges=False) + + # Filter front and back indices to remove overlaps with top and bottom at each level + filtered_front_indices = [] + filtered_back_indices = [] + filtered_top_indices = [] + filtered_bottom_indices = [] + for level in range(num_levels): + left_set = set(zip(*left_indices[level])) if left_indices[level] else set() + right_set = set(zip(*right_indices[level])) if right_indices[level] else set() + top_set = set(zip(*top_indices[level])) if top_indices[level] else set() + bottom_set = set(zip(*bottom_indices[level])) if bottom_indices[level] else set() + front_set = set(zip(*front_indices[level])) if front_indices[level] else set() + back_set = set(zip(*back_indices[level])) if back_indices[level] else set() + filtered_front_set = front_set - (top_set | bottom_set | left_set | right_set) + filtered_back_set = back_set - (top_set | bottom_set | left_set | right_set) + filtered_top_set = top_set - (left_set | right_set) + filtered_bottom_set = bottom_set - (left_set | right_set) + filtered_front_indices.append( + [list(coords) for coords in zip(*filtered_front_set)] if filtered_front_set else [] + ) + filtered_back_indices.append( + [list(coords) for coords in zip(*filtered_back_set)] if filtered_back_set else [] + ) + filtered_top_indices.append( + [list(coords) for coords in zip(*filtered_top_set)] if filtered_top_set else [] + ) + filtered_bottom_indices.append( + [list(coords) for coords in zip(*filtered_bottom_set)] if filtered_bottom_set else [] + ) + + # Turbulent Flow Profile + def bc_profile_taper(taper_fraction=0.07): + assert compute_backend == ComputeBackend.NEON + _, ny, nz = grid_shape_zip + dtype = precision_policy.compute_precision.wp_dtype + H_y = dtype(ny-1) + H_z = dtype(nz-1) + two = dtype(2.0) + ulb_wp = dtype(ulb) + taper_frac = dtype(taper_fraction) + core_frac = dtype(1.0 - 2.0 * taper_fraction) + _u_vec = wp.vec(velocity_set.d,dtype=dtype) + + @wp.func + def bc_profile_warp(index: wp.vec3i): + y = dtype(index[1]) + z = dtype(index[2]) + y_center = wp.abs(y - (H_y / two)) + z_center = wp.abs(z - (H_z / two)) + y_norm = two * y_center / H_y + z_norm = two * z_center / H_z + max_norm = wp.max(y_norm, z_norm) + velocity = ulb_wp + if max_norm > core_frac: + velocity = ulb_wp * (dtype(1.0) - (max_norm - core_frac) / taper_frac) + velocity = wp.max(dtype(0.0), velocity) + return wp.vec(velocity, length=1) + + return bc_profile_warp + + # Initialize boundary conditions + + + #bc_inlet = HybridBC( + # bc_method="nonequilibrium_regularized", + # prescribed_value=(ulb, 0.0, 0.0), + # indices=left_indices + #) + + bc_inlet = RegularizedBC( + "velocity", + #profile=bc_profile_taper(), + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + + bc_outlet = DoNothingBC(indices=right_indices) + + #bc_top = FullwayBounceBackBC(indices=top_indices) + bc_top = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=top_indices) + + bc_bottom = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=bottom_indices) + #bc_bottom = FullwayBounceBackBC(indices=bottom_indices) + #bc_front = FullwayBounceBackBC(indices=filtered_front_indices) + bc_front = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=filtered_front_indices) + #bc_back = FullwayBounceBackBC(indices=filtered_back_indices) + bc_back = HybridBC(bc_method="nonequilibrium_regularized",prescribed_value=(ulb, 0.0, 0.0),indices=filtered_back_indices) + + bc_body = HybridBC( + bc_method="bounceback_grads", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=3), + use_mesh_distance=True, + ) + + return [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + # return [bc_walls, bc_inlet, bc_outlet, bc_body] + +# Simulation Initialization +# ========================= +def initialize_simulation(grid, boundary_conditions, omega, initializer, collision_type="KBC", mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST): + """ + Initialize the multiresolution simulation manager. + """ + sim = xlb.helper.MultiresSimulationManager( + omega=omega, + grid=grid, + boundary_conditions=boundary_conditions, + collision_type=collision_type, + initializer=initializer, + mres_perf_opt=mres_perf_opt, + ) + return sim + +# Utility Functions +# ================= +def print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size): + """ + Calculate and print lift and drag coefficients. + """ + boundary_force = momentum_transfer(sim.f_0, sim.f_1, sim.bc_mask, sim.missing_mask) + drag = boundary_force[0] + lift = boundary_force[2] + cd = 2.0 * drag / (ulb**2 * reference_area) + cl = 2.0 * lift / (ulb**2 * reference_area) + if np.isnan(cd) or np.isnan(cl): + raise ValueError(f"NaN detected in coefficients at step {step}: Cd={cd}, Cl={cl}") + drag_values.append([cd, cl]) + # print(f"CD={cd:.3f}, CL={cl:.3f}, Drag Force (lattice units)={drag:.6f}") + return cd, cl, drag + +def plot_drag_lift(drag_values, output_dir, print_interval, script_name, percentile_range=(15, 85), use_log_scale=False): + """ + Plot CD and CL over time and save the plot to the output directory. + """ + drag_values_array = np.array(drag_values) + steps = np.arange(0, len(drag_values) * print_interval, print_interval) + cd_values = drag_values_array[:, 0] + cl_values = drag_values_array[:, 1] + y_min = min(np.percentile(cd_values, percentile_range[0]), np.percentile(cl_values, percentile_range[0])) + y_max = max(np.percentile(cd_values, percentile_range[1]), np.percentile(cl_values, percentile_range[1])) + padding = (y_max - y_min) * 0.1 + y_min, y_max = y_min - padding, y_max + padding + if use_log_scale: + y_min = max(y_min, 1e-6) + plt.figure(figsize=(10, 6)) + plt.plot(steps, cd_values, label='Drag Coefficient (Cd)', color='blue') + plt.plot(steps, cl_values, label='Lift Coefficient (Cl)', color='red') + plt.xlabel('Simulation Step') + plt.ylabel('Coefficient') + plt.title(f'{script_name}: Drag and Lift Coefficients Over Time') + plt.legend() + plt.grid(True) + plt.ylim(y_min, y_max) + if use_log_scale: + plt.yscale('log') + plt.savefig(os.path.join(output_dir, 'drag_lift_plot.png')) + plt.close() + +def compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size): + """ + Compute active/solid voxels, totals, lattice updates, and reference area based on simulation data. + """ + # Compute macro fields + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + fields_data = bc_mask_exporter.get_fields_data({"bc_mask": sim.bc_mask}) + bc_mask_data = fields_data["bc_mask_0"] + level_id_field = bc_mask_exporter.level_id_field + + # Compute solid voxels per level (assuming 255 is the solid marker) + solid_voxels = [] + for lvl in range(actual_num_levels): + level_mask = level_id_field == lvl + solid_voxels.append(np.sum(bc_mask_data[level_mask] == 255)) + + # Compute active voxels (total non-zero in sparsity minus solids) + active_voxels = [np.count_nonzero(mask) for mask in sparsity_pattern] + active_voxels = [max(0, active_voxels[lvl] - solid_voxels[lvl]) for lvl in range(actual_num_levels)] + + # Totals + total_voxels = sum(active_voxels) + total_lattice_updates_per_step = sum(active_voxels[lvl] * (2 ** (actual_num_levels - 1 - lvl)) for lvl in range(actual_num_levels)) + + # Compute reference area (projected on YZ plane at finest level) + finest_level = 0 + mask_finest = level_id_field == finest_level + bc_mask_finest = bc_mask_data[mask_finest] + active_indices_finest = np.argwhere(level_data[0][0]) + bc_body_id = boundary_conditions[-1].id # Assuming last BC is bc_body + solid_voxels_indices = active_indices_finest[bc_mask_finest == bc_body_id] + unique_jk = np.unique(solid_voxels_indices[:, 1:3], axis=0) + reference_area = unique_jk.shape[0] + reference_area_physical = reference_area * (voxel_size ** 2) + + return { + "active_voxels": active_voxels, + "solid_voxels": solid_voxels, + "total_voxels": total_voxels, + "total_lattice_updates_per_step": total_lattice_updates_per_step, + "reference_area": reference_area, + "reference_area_physical": reference_area_physical + } + + +def plot_data(x0, output_dir, delta_x_coarse, sim, IOexporter, prefix='SAE'): + ''' + Windsor Car Model No Wheels #https://repository.lboro.ac.uk/articles/dataset/Windsor_Body_Experimental_Aerodynamic_Dataset/13161284 + Profiles on symmetry plane (y=0) covering entire field + Origin of coordinate system: + x=0: center of the wheelbase, y=0: symmetry plane, z=0: ground plane + + Coordaintes in meters + Velocity data in m/s + + Key is Xlocation + Value X is vx + Value Y is z + ''' + + def _load_sim_line(csv_path): + """ + Read a CSV exported by IOexporter.to_line without pandas. + Returns (z, Ux). + """ + # Read with header as column names + data = np.genfromtxt( + csv_path, + delimiter=',', + names=True, # use header + autostrip=True, + dtype=None, # let numpy infer dtypes + encoding='utf-8' # handle any non-ascii names + ) + if data.size == 0: + raise ValueError(f"No data in {csv_path}") + + names = data.dtype.names + lower = {n: n.lower() for n in names} + + # Find z-like column (fallback: first column) + z_candidates = [ + n for n in names + if lower[n] == 'z' + or lower[n] in ('s', 'distance', 'arc_length', 'arclength') + or 'z' == lower[n].split('_')[-1] + ] + z_name = z_candidates[0] if z_candidates else names[0] + + # Find velocity-x column (fallback: last column) + vel_candidates = [n for n in names if any(k in lower[n] for k in ('vel', 'u', 'velocity'))] + # Prefer an x-component if present (common patterns after numpy sanitizes names) + vel_x_pref = [n for n in vel_candidates if any(k in lower[n] for k in ('x', '_0', '0'))] + vel_name = vel_x_pref[0] if vel_x_pref else (vel_candidates[0] if vel_candidates else names[-1]) + + z = np.asarray(data[z_name], dtype=float) + ux = np.asarray(data[vel_name], dtype=float) + return z, ux + + testData = { + '0.493' : { 'x' : [0,0,40.14,40.2,40.04,39.82,39.42,38.77,38.11,37.87,36.57,31.78,23.25,12.62,4.32,0.27,-0.33,-0.35,-0.22,-0.13,-0.13,-0.08,-0.07,-0.14,-0.27,-0.4,-0.5,-0.54,-0.56,-0.57,-0.59,-0.63,-0.67,-0.69,-0.71,-0.76,-0.83,-0.88,-0.92,-0.95,-0.97,-0.98,-0.99,-1,-1.04,-1.1,-1.15,-1.17,-1.18,-1.17,-1.14,-1.11,-1.08,-1.06,-1.05,-1.05,-1.06,-1.06,-1.05,-1.06,-1.1,-1.14,-1.15,-1.14,-1.14,-1.15,-1.17,-1.19,-1.21,-1.23,-1.23,-1.22,-1.22,-1.22,-1.22,-1.2,-1.19,-1.18,-1.17,-1.17,-1.18,-1.2,-1.25,-1.27,-1.25,-1.24,-1.24,-1.28,-1.32,-1.33,-1.32,-1.28,-1.24,-1.22,-1.24,-1.29,-1.32,-1.33,-1.32,-1.29,-1.27,-1.25,-1.25,-1.24,-1.22,-1.2,-1.18,-1.18,-1.19,-1.2,-1.2,-1.18,-1.13,-1.08,-1.05,-1.04,-1.01,-0.98,-0.95,-0.93,-0.96,-1.01,-1.04,-1.04,-1.01,-0.99,-0.98,-0.98,-0.96,-0.92,-0.9,-0.89,-0.88,-0.87,-0.84,-0.82,-0.82,-0.82,-0.8,-0.77,-0.73,-0.66,-0.58,-0.51,-0.47,-0.48,-0.5,-0.49,-0.44,-0.37,-0.33,-0.33,-0.34,-0.34,-0.33,-0.27,-0.18,-0.11,-0.08,-0.09,-0.07,0,0.08,0.15,0.2,0.19,-0.05,-0.42,-0.53,-0.09,1.11,4.28,11.24,21.22,30.38,35.41,37.35,38.01,38.85,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '0.555' : { 'x' : [0,0,40.66,40.72,40.36,40.09,39.53,38.62,37.18,35.03,31.95,28.03,23.56,18.84,14.57,10.9,7.8,5.31,3.38,1.95,0.89,0.17,-0.41,-0.88,-1.24,-1.56,-1.84,-2.09,-2.32,-2.54,-2.77,-2.99,-3.21,-3.41,-3.59,-3.77,-3.94,-4.11,-4.27,-4.43,-4.6,-4.76,-4.88,-4.96,-5.04,-5.12,-5.23,-5.35,-5.47,-5.59,-5.7,-5.79,-5.86,-5.91,-5.96,-6.02,-6.07,-6.13,-6.19,-6.26,-6.32,-6.37,-6.41,-6.43,-6.43,-6.44,-6.45,-6.47,-6.49,-6.49,-6.47,-6.45,-6.44,-6.45,-6.48,-6.52,-6.55,-6.57,-6.57,-6.57,-6.58,-6.59,-6.6,-6.6,-6.59,-6.58,-6.58,-6.57,-6.56,-6.54,-6.52,-6.52,-6.52,-6.51,-6.48,-6.44,-6.39,-6.35,-6.31,-6.28,-6.27,-6.29,-6.31,-6.31,-6.3,-6.27,-6.26,-6.24,-6.23,-6.19,-6.12,-6.06,-6,-5.96,-5.93,-5.91,-5.9,-5.88,-5.84,-5.76,-5.65,-5.53,-5.45,-5.41,-5.39,-5.34,-5.29,-5.23,-5.17,-5.1,-5,-4.9,-4.8,-4.7,-4.61,-4.53,-4.47,-4.42,-4.36,-4.29,-4.21,-4.14,-4.07,-3.99,-3.89,-3.77,-3.64,-3.51,-3.38,-3.23,-3.05,-2.85,-2.64,-2.43,-2.21,-1.98,-1.73,-1.47,-1.19,-0.89,-0.56,-0.18,0.34,1.01,1.9,3.24,5.09,7.5,10.58,14.34,18.53,22.96,27.38,31.19,34.15,36.36,38.03,39.39,40.58,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '0.652' : { 'x' : [0,0,39.38,39.1,38.23,37.18,35.85,34.27,32.51,30.65,28.69,26.64,24.53,22.31,20.11,17.95,15.9,14.02,12.25,10.55,8.93,7.39,5.93,4.57,3.32,2.22,1.26,0.38,-0.44,-1.19,-1.86,-2.44,-2.94,-3.4,-3.83,-4.26,-4.72,-5.17,-5.58,-5.95,-6.29,-6.59,-6.87,-7.12,-7.33,-7.52,-7.69,-7.86,-8.03,-8.2,-8.37,-8.52,-8.66,-8.8,-8.97,-9.16,-9.34,-9.5,-9.63,-9.75,-9.88,-10.05,-10.23,-10.4,-10.54,-10.63,-10.7,-10.76,-10.83,-10.92,-11.02,-11.11,-11.2,-11.27,-11.31,-11.33,-11.33,-11.33,-11.32,-11.31,-11.33,-11.37,-11.41,-11.45,-11.46,-11.46,-11.44,-11.38,-11.3,-11.24,-11.2,-11.18,-11.16,-11.11,-11.03,-10.94,-10.87,-10.82,-10.77,-10.72,-10.66,-10.58,-10.49,-10.39,-10.29,-10.18,-10.08,-9.97,-9.87,-9.78,-9.71,-9.65,-9.57,-9.45,-9.3,-9.14,-9,-8.87,-8.76,-8.65,-8.53,-8.37,-8.2,-8.03,-7.88,-7.74,-7.62,-7.53,-7.44,-7.33,-7.22,-7.1,-6.97,-6.81,-6.64,-6.44,-6.25,-6.08,-5.91,-5.75,-5.58,-5.41,-5.26,-5.11,-4.91,-4.65,-4.34,-3.98,-3.58,-3.13,-2.6,-2.01,-1.38,-0.71,0.04,0.88,1.86,2.99,4.24,5.61,7.14,8.89,10.82,12.85,14.96,17.18,19.54,21.97,24.42,26.87,29.25,31.55,33.75,35.79,37.63,39.26,40.63,41.75,42.69,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '0.77' : { 'x' : [0,0,35.95,35.66,34.88,34.05,33.14,32.19,31.11,29.92,28.66,27.42,26.17,24.91,23.66,22.29,20.84,19.43,18.08,16.75,15.47,14.21,12.93,11.66,10.48,9.39,8.3,7.19,6.07,4.98,3.93,2.91,1.92,0.97,0.05,-0.84,-1.7,-2.51,-3.25,-3.88,-4.39,-4.84,-5.29,-5.77,-6.31,-6.87,-7.36,-7.75,-8.08,-8.46,-8.88,-9.3,-9.68,-10,-10.28,-10.53,-10.77,-11.03,-11.28,-11.52,-11.75,-11.98,-12.2,-12.4,-12.55,-12.65,-12.76,-12.93,-13.13,-13.33,-13.49,-13.59,-13.65,-13.7,-13.76,-13.87,-13.99,-14.1,-14.19,-14.24,-14.27,-14.26,-14.24,-14.22,-14.22,-14.23,-14.22,-14.19,-14.15,-14.11,-14.06,-14.01,-13.98,-13.94,-13.86,-13.7,-13.49,-13.32,-13.21,-13.13,-13,-12.83,-12.64,-12.45,-12.28,-12.13,-12.02,-11.91,-11.77,-11.62,-11.48,-11.3,-11.05,-10.72,-10.35,-10,-9.69,-9.4,-9.12,-8.82,-8.53,-8.25,-7.98,-7.71,-7.42,-7.07,-6.7,-6.34,-5.99,-5.67,-5.33,-4.97,-4.53,-4.02,-3.46,-2.85,-2.2,-1.56,-0.95,-0.33,0.31,1,1.75,2.58,3.49,4.5,5.56,6.63,7.72,8.82,9.94,11.14,12.41,13.72,15.06,16.46,17.91,19.35,20.75,22.12,23.5,24.93,26.38,27.87,29.37,30.87,32.37,33.81,35.18,36.48,37.72,38.87,39.9,40.81,41.61,42.28,42.87,43.38,43.81,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '0.867' : { 'x' : [0,0,0,0,0,14.13,29.36,28.91,28.42,27.84,27.2,26.39,25.54,24.62,23.71,22.73,21.75,20.79,19.82,18.88,17.93,16.99,16,15.03,14.05,13.1,12.2,11.35,10.55,9.76,8.98,8.27,7.58,6.91,6.24,5.58,4.92,4.28,3.62,2.94,2.3,1.7,1.13,0.59,0.07,-0.46,-0.99,-1.49,-1.96,-2.41,-2.82,-3.2,-3.54,-3.82,-4.08,-4.33,-4.59,-4.89,-5.2,-5.51,-5.79,-6.06,-6.33,-6.55,-6.72,-6.85,-6.96,-7.06,-7.17,-7.28,-7.4,-7.54,-7.68,-7.78,-7.86,-7.92,-7.99,-8.07,-8.14,-8.19,-8.23,-8.26,-8.27,-8.24,-8.18,-8.07,-7.94,-7.8,-7.68,-7.57,-7.46,-7.33,-7.18,-7.02,-6.87,-6.71,-6.52,-6.31,-6.1,-5.91,-5.72,-5.53,-5.34,-5.14,-4.94,-4.75,-4.55,-4.32,-4.06,-3.79,-3.52,-3.22,-2.94,-2.69,-2.45,-2.19,-1.88,-1.52,-1.12,-0.73,-0.35,0.05,0.48,0.94,1.42,1.89,2.33,2.75,3.19,3.71,4.29,4.93,5.61,6.28,6.93,7.62,8.37,9.18,10.01,10.84,11.66,12.52,13.41,14.3,15.2,16.15,17.17,18.2,19.18,20.1,21.04,22.02,23.02,24.04,25.09,26.18,27.26,28.34,29.41,30.48,31.47,32.37,33.21,34.02,34.83,35.62,36.4,37.11,37.73,38.28,38.77,39.21,39.62,40.01,40.37,40.68,40.96,41.23,41.49,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '0.96' : { 'x' : [0,0,0,0,0,12.11,25.44,25.31,25.11,24.81,24.53,24.1,23.7,23.24,22.74,22.22,21.71,21.19,20.6,20.05,19.54,19,18.39,17.79,17.19,16.65,16.11,15.55,14.98,14.39,13.79,13.22,12.69,12.2,11.74,11.28,10.85,10.43,9.98,9.5,9.09,8.72,8.27,7.79,7.32,6.88,6.51,6.19,5.92,5.66,5.41,5.16,4.93,4.7,4.45,4.16,3.85,3.55,3.32,3.19,3.11,3,2.86,2.73,2.62,2.51,2.41,2.3,2.21,2.14,2.08,2.04,2.02,2,1.99,2.02,2.11,2.25,2.39,2.5,2.58,2.64,2.67,2.7,2.76,2.86,3.01,3.17,3.33,3.48,3.63,3.78,3.96,4.18,4.42,4.7,4.99,5.29,5.55,5.77,5.96,6.18,6.45,6.72,6.96,7.22,7.52,7.86,8.19,8.51,8.84,9.19,9.57,9.95,10.34,10.71,11.03,11.29,11.56,11.87,12.21,12.6,13.06,13.56,14.05,14.5,14.94,15.41,15.86,16.33,16.85,17.41,17.97,18.55,19.1,19.59,20.15,20.74,21.43,22.12,22.77,23.36,23.92,24.52,25.2,25.96,26.75,27.53,28.27,28.94,29.54,30.11,30.69,31.25,31.79,32.34,32.88,33.4,33.95,34.47,34.94,35.41,35.82,36.2,36.54,36.86,37.18,37.52,37.87,38.2,38.49,38.71,38.91,39.1,39.28,39.46,39.65,39.83,39.99,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '1.063' : { 'x' : [0,0,0,0,0,10.35,21.96,22.07,22.15,22.17,22.15,22.05,21.96,21.84,21.66,21.48,21.4,21.23,20.96,20.69,20.36,20.09,19.84,19.61,19.4,19.11,18.81,18.43,18.05,17.68,17.38,17.16,16.96,16.72,16.43,16.13,15.83,15.52,15.24,15.05,14.86,14.64,14.39,14.11,13.83,13.56,13.42,13.3,13.19,13.08,12.89,12.67,12.44,12.26,12.12,12.08,12.03,11.99,11.93,11.83,11.74,11.68,11.67,11.68,11.65,11.62,11.61,11.6,11.55,11.46,11.43,11.49,11.57,11.66,11.76,11.9,12.07,12.22,12.42,12.65,12.83,12.98,13.14,13.29,13.46,13.66,13.91,14.18,14.46,14.71,14.86,14.95,15.04,15.18,15.45,15.8,16.15,16.49,16.79,17.04,17.23,17.38,17.58,17.83,18.11,18.4,18.67,18.95,19.21,19.44,19.67,19.88,20.09,20.34,20.59,20.83,21.06,21.33,21.64,22,22.39,22.78,23.15,23.51,23.83,24.13,24.46,24.84,25.21,25.58,25.94,26.31,26.69,27.06,27.41,27.79,28.18,28.55,28.93,29.29,29.66,30.01,30.32,30.62,30.95,31.33,31.71,32.05,32.4,32.71,33.02,33.32,33.64,33.98,34.31,34.61,34.9,35.17,35.44,35.7,35.96,36.25,36.54,36.83,37.1,37.35,37.58,37.79,37.99,38.19,38.38,38.54,38.67,38.81,38.95,39.09,39.2,39.31,39.41,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '1.164' : { 'x' : [0,0,0,0,0,10.12,21.19,21.28,21.33,21.33,21.29,21.25,21.26,21.16,21.16,21.05,21.07,21.02,21.01,20.88,20.82,20.7,20.59,20.53,20.31,20.16,20.12,20.05,19.92,19.78,19.64,19.5,19.34,19.15,18.97,18.8,18.67,18.5,18.24,18.01,17.86,17.76,17.66,17.65,17.64,17.58,17.46,17.39,17.38,17.41,17.4,17.39,17.33,17.32,17.25,17.17,17.16,17.26,17.37,17.45,17.51,17.6,17.63,17.63,17.61,17.75,17.86,17.94,18.02,18.08,18.24,18.45,18.62,18.81,19.03,19.17,19.29,19.41,19.58,19.76,20.04,20.24,20.41,20.58,20.72,20.87,21.04,21.25,21.49,21.7,21.89,22.09,22.27,22.45,22.66,22.88,23.09,23.3,23.52,23.76,24.01,24.24,24.43,24.56,24.68,24.84,25.02,25.2,25.39,25.57,25.73,25.93,26.17,26.45,26.75,26.99,27.19,27.45,27.65,27.87,28.04,28.2,28.38,28.56,28.79,29.03,29.32,29.62,29.86,30.09,30.31,30.49,30.7,30.93,31.19,31.45,31.72,31.98,32.26,32.52,32.77,33.01,33.25,33.49,33.76,33.96,34.13,34.34,34.55,34.8,35.06,35.3,35.49,35.67,35.84,36,36.19,36.36,36.54,36.73,36.9,37.07,37.27,37.46,37.64,37.82,37.99,38.13,38.21,38.29,38.39,38.49,38.61,38.74,38.85,38.95,39.03,39.11,39.17,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + '1.18' : { 'x' : [0,0,0,0,0,0,0,9.37,17.78,21.44,21.38,21.48,21.48,21.4,21.29,21.31,21.1,20.93,20.88,20.71,20.66,20.61,20.45,20.32,20.06,19.92,19.92,19.77,19.77,19.67,19.48,19.49,19.26,19.21,19.06,19.03,18.9,18.72,18.62,18.52,18.39,18.32,18.19,18.19,18.06,17.89,17.73,17.7,17.68,17.81,17.83,17.88,17.9,17.78,17.82,17.82,17.78,17.91,17.96,18.18,18.29,18.4,18.29,18.22,18.2,18.35,18.5,18.52,18.76,18.88,19.01,18.98,19.11,19.15,19.25,19.42,19.69,19.91,20.13,20.39,20.6,20.97,21.28,21.51,21.7,21.8,21.9,22.18,22.45,22.67,22.88,22.99,23.11,23.29,23.35,23.5,23.83,24.05,24.21,24.35,24.66,24.95,25.08,25.3,25.43,25.61,25.77,25.81,26.01,26.21,26.41,26.67,26.96,27.09,27.26,27.44,27.55,27.75,28.09,28.26,28.54,28.73,28.85,29.09,29.37,29.64,29.87,30.08,30.21,30.43,30.62,30.84,31.11,31.34,31.61,31.74,31.92,32.18,32.35,32.64,32.88,33.1,33.44,33.73,34.03,34.17,34.28,34.53,34.61,34.95,35.15,35.33,35.55,35.77,35.95,36.06,36.22,36.4,36.64,36.74,36.89,36.94,37.08,37.3,37.32,37.59,37.73,37.71,37.9,38.13,38.19,38.35,38.55,38.61,38.62,38.73,38.81,38.93,39.04,0,0,0,0,0,0], 'y' : [0.029,0.03,0.032,0.034,0.036,0.038,0.04,0.041,0.043,0.045,0.047,0.049,0.051,0.052,0.054,0.056,0.058,0.06,0.062,0.063,0.065,0.067,0.069,0.071,0.073,0.074,0.076,0.078,0.08,0.082,0.084,0.085,0.087,0.089,0.091,0.093,0.095,0.096,0.098,0.1,0.102,0.104,0.106,0.107,0.109,0.111,0.113,0.115,0.117,0.118,0.12,0.122,0.124,0.126,0.128,0.129,0.131,0.133,0.135,0.137,0.139,0.14,0.142,0.144,0.146,0.148,0.15,0.151,0.153,0.155,0.157,0.159,0.161,0.162,0.164,0.166,0.168,0.17,0.172,0.173,0.175,0.177,0.179,0.181,0.183,0.184,0.186,0.188,0.19,0.192,0.194,0.195,0.197,0.199,0.201,0.203,0.204,0.206,0.208,0.21,0.212,0.214,0.215,0.217,0.219,0.221,0.223,0.225,0.226,0.228,0.23,0.232,0.234,0.236,0.237,0.239,0.241,0.243,0.245,0.247,0.248,0.25,0.252,0.254,0.256,0.258,0.259,0.261,0.263,0.265,0.267,0.269,0.27,0.272,0.274,0.276,0.278,0.28,0.281,0.283,0.285,0.287,0.289,0.291,0.292,0.294,0.296,0.298,0.3,0.302,0.303,0.305,0.307,0.309,0.311,0.313,0.314,0.316,0.318,0.32,0.322,0.324,0.325,0.327,0.329,0.331,0.333,0.335,0.336,0.338,0.34,0.342,0.344,0.346,0.347,0.349,0.351,0.353,0.355,0.357,0.358,0.36,0.362,0.364,0.366]}, + + } + + xData =[0.493, 0.555, 0.652, 0.77, 0.867, 0.96, 1.063, 1.164, 1.18] + + + for i in xData: + #Extract y dimension and convert from mm to meter + refY = np.array(testData[str(i)]['y']) + #u is already converted to model units (m/s) no need to convert reference velocity + #Ref uses neg X for flow direction we'll reverse for posX + refX = np.array(testData[str(i)]['x']) + + #From reference x0 (rear of body) find x1 for plot + x1 = x0[0] + i + + print(f' x1 is {x1}') + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{prefix}_{str(i)}") + wp.synchronize() + IOexporter.to_line( + filename, + {"velocity": sim.u}, + start_point=(x1, x0[1], x0[2]), + end_point=(x1, x0[1], x0[2]+1.0), + resolution=250, + component=0, + radius=delta_x_coarse #needed with model units + ) + # read the CSV written by the exporter + csv_path = filename + "_velocity_0.csv" # adjust if your exporter uses another extension + print(f"CSV path is {csv_path}") + + try: + sim_z, sim_ux = _load_sim_line(csv_path) + except Exception as e: + print(f"Failed to read {csv_path}: {e}") + continue + + # plot reference vs simulation + plt.figure(figsize=(4.5, 6)) + plt.plot(refX, refY, 'o', mfc='none', label='Experimental') + plt.plot(sim_ux, sim_z, '-', lw=2, label='Simulation') + plt.xlim(np.min(refX)*.9, np.max(refX)*1.1) + plt.ylim(np.min(refY), np.max(refY)) + plt.xlabel('Ux [m/s]') + plt.ylabel('z [m]') + plt.title(f'Velocity Plot at {i:+.3f}') + plt.grid(True, alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(filename + ".png", dpi=150) + plt.close() + + + +# Main Script +# =========== +# Initialize XLB +xlb.init( + velocity_set=velocity_set, + default_backend=compute_backend, + default_precision_policy=precision_policy, +) + +# Generate mesh +if mesher_type == "makemesh": + level_data, body_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 = generate_makemesh_mesh( + stl_filename, voxel_size, trim, trim_voxels + ) +elif mesher_type == "cuboid": + level_data, body_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins, x0 = generate_cuboid_mesh( + stl_filename, voxel_size, trim, trim_voxels + ) +else: + raise ValueError(f"Invalid mesher_type: {mesher_type}. Must be 'makemesh' or 'cuboid'.") + +# Characteristic length +L = partSize[0] +L = float(L) # Cast to built-in float to avoid NumPy type propagation issues with Warp + +# Compute Re +Re = u_physical * L / kinematic_viscosity + +# Calculate lattice parameters +delta_x_coarse = voxel_size * 2 ** (actual_num_levels - 1) +delta_t = voxel_size * ulb / u_physical +nu_lattice = kinematic_viscosity * delta_t / (voxel_size ** 2) +omega = 1.0 / (3.0 * nu_lattice + 0.5) + +# Create output directory +current_dir = os.path.join(os.path.dirname(__file__)) +output_dir = os.path.join(current_dir, script_name) +if os.path.exists(output_dir): + shutil.rmtree(output_dir) +os.makedirs(output_dir) + +# Define exporter objects +field_name_cardinality_dict = {"velocity": 3, "density": 1} +h5exporter = MultiresIO( + field_name_cardinality_dict, + level_data, + scale=voxel_size, + offset=-shift, + timestep_size=delta_t, +) +bc_mask_exporter = MultiresIO({"bc_mask": 1}, level_data) + +# Create grid +grid = multires_grid_factory( + grid_shape_zip, + velocity_set=velocity_set, + sparsity_pattern_list=sparsity_pattern, + sparsity_pattern_origins=[neon.Index_3d(*box_origin) for box_origin in level_origins], +) + +# Calculate num_steps +coarsest_level = grid.count_levels - 1 +grid_shape_x_coarsest = grid.level_to_shape(coarsest_level)[0] +num_steps = int(flow_passes * (grid_shape_x_coarsest / ulb)) + + +# Calculate print and file output intervals +print_interval = max(1, int(num_steps * (print_interval_percentage / 100.0))) +crossover_step = int(num_steps * (file_output_crossover_percentage / 100.0)) +file_output_interval_pre_crossover = max(1, int(crossover_step / num_file_outputs_pre_crossover)) if num_file_outputs_pre_crossover > 0 else num_steps + 1 +file_output_interval_post_crossover = max(1, int((num_steps - crossover_step) / num_file_outputs_post_crossover)) if num_file_outputs_post_crossover > 0 else num_steps + 1 +final_print_interval = max(1, int((num_steps-crossover_step) * (print_interval_percentage / 100.0))) + +# Setup boundary conditions +boundary_conditions = setup_boundary_conditions(grid, level_data, body_vertices, ulb, compute_backend) + +# Create initializer +initializer = CustomMultiresInitializer( + bc_id=boundary_conditions[-2].id, # bc_outlet + constant_velocity_vector=(ulb, 0.0, 0.0), + velocity_set=velocity_set, + precision_policy=precision_policy, + compute_backend=compute_backend, +) + +# Initialize simulation +sim = initialize_simulation(grid, boundary_conditions, omega, initializer) + +# Compute voxel statistics and reference area +stats = compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size) +active_voxels = stats["active_voxels"] +solid_voxels = stats["solid_voxels"] +total_voxels = stats["total_voxels"] +total_lattice_updates_per_step = stats["total_lattice_updates_per_step"] +reference_area = stats["reference_area"] +reference_area_physical = stats["reference_area_physical"] + +# Save initial bc_mask +filename = os.path.join(output_dir, f"{script_name}_initial_bc_mask") +bc_mask_exporter.to_hdf5(filename, {"bc_mask": sim.bc_mask}, compression="gzip", compression_opts=0) + +wp.synchronize() + +# Setup momentum transfer +# momentum_transfer = MultiresMomentumTransfer(boundary_conditions[-1], compute_backend=compute_backend) # bc_body + +momentum_transfer = MultiresMomentumTransfer( + boundary_conditions[-1], + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + compute_backend=compute_backend, +) + +# Print simulation info +print("\n" + "=" * 50 + "\n") +print(f"Simulation Configuration for Re = {Re}:") +# print(f"Grid shape at finest level: {grid_shape_zip}") +# print(f"Grid shape at coarsest level: {grid.level_to_shape(coarsest_level)}") +print(f"Number of flow passes: {flow_passes}") +print(f"Calculated iterations: {num_steps:,}") +# print(f"Output directory: {output_dir}") +# print(f"Print interval: {print_interval} steps (every {print_interval_percentage}% of iterations)") +# print(f"File output interval pre-crossover (0-{file_output_crossover_percentage}%): {file_output_interval_pre_crossover} steps") +# print(f"File output interval post-crossover ({file_output_crossover_percentage}-100%): {file_output_interval_post_crossover} steps") +print(f"Finest voxel size: {voxel_size} meters") +print(f"Coarsest voxel size: {delta_x_coarse} meters") +print(f"Total voxels: {sum(np.count_nonzero(mask) for mask in sparsity_pattern):,}") +print(f"Total active voxels: {total_voxels:,}") +print(f"Active voxels per level: {active_voxels}") +print(f"Solid voxels per level: {solid_voxels}") +print(f"Total lattice updates per global step: {total_lattice_updates_per_step:,}") +print(f"Actual number of refinement levels: {actual_num_levels}") +print(f"Physical inlet velocity: {u_physical:.4f} m/s") +print(f"Lattice velocity (ulb): {ulb}") +print(f"Characteristic length: {L: .4f} meters") +# print(f"Kinematic viscosity: {kinematic_viscosity} m^2/s") +print(f"Computed reference area (bc_mask): {reference_area} lattice units") +print(f"Physical reference area (bc_mask): {reference_area_physical:.6f} m^2") +print(f"Reynolds number: {Re:,.2f}") +# print(f"Lattice viscosity: {nu_lattice:.5f}") +print(f"Relaxation parameter (omega): {omega:.5f}") +print("\n" + "=" * 50 + "\n") + +# -------------------------- Simulation Loop -------------------------- +wp.synchronize() +start_time = time.time() +compute_time = 0.0 +steps_since_last_print = 0 +drag_values = [] + +for step in range(num_steps): + step_start = time.time() + sim.step() + wp.synchronize() + compute_time += time.time() - step_start + steps_since_last_print += 1 + if step % print_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + if step % print_interval*10 == 0: + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2000, + bounds=(0, 1, 0, 1), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = u_physical*1.5, #eventually we could have the 1.5 read from json as we did before + ) + end_time = time.time() + elapsed = end_time - start_time + total_lattice_updates = total_lattice_updates_per_step * steps_since_last_print + MLUPS = total_lattice_updates / compute_time / 1e6 if compute_time > 0 else 0.0 + current_flow_passes = step * ulb / grid_shape_x_coarsest + remaining_steps = num_steps - step - 1 + time_remaining = 0.0 if MLUPS == 0 else (total_lattice_updates_per_step * remaining_steps) / (MLUPS * 1e6) + hours, rem = divmod(time_remaining, 3600) + minutes, seconds = divmod(rem, 60) + time_remaining_str = f"{int(hours):02d}h {int(minutes):02d}m {int(seconds):02d}s" + percent_complete = (step + 1) / num_steps * 100 + print(f"Completed step {step}/{num_steps} ({percent_complete:.2f}% complete)") + print(f" Flow Passes: {current_flow_passes:.2f}") + print(f" Time elapsed: {elapsed:.1f}s, Compute time: {compute_time:.1f}s, ETA: {time_remaining_str}") + print(f" MLUPS: {MLUPS:.1f}") + print(f" Cd= {cd:.3f}, Cl= {cl:.3f}, Drag Force (lattice units)={drag:.3f}") + start_time = time.time() + compute_time = 0.0 + steps_since_last_print = 0 + file_output_interval = file_output_interval_pre_crossover if step < crossover_step else file_output_interval_post_crossover + if step % file_output_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + try: + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + xmf_filename = f"{filename}.xmf" + hdf5_basename = f"{script_name}_{step:04d}.h5" + except Exception as e: + print(f"Error during file output at step {step}: {e}") + wp.synchronize() + if step >= crossover_step and step % final_print_interval ==0 : + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + print(f"Completed step {step}/{num_steps} ") + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + print(f" Cd= {cd:.3f}, Cl= {cl:.3f}, Drag Force (lattice units)={drag:.3f}") + if step % 10*final_print_interval ==0 : + filename = os.path.join(output_dir, f"{script_name}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2000, + bounds=(0, 1, 0, 1), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = u_physical*1.5, #eventually we could have the 1.5 read from json as we did before + ) + + if step == num_steps - 1: + plot_data(x0, output_dir, delta_x_coarse, sim, h5exporter, prefix='WindsorNW') + +# Save drag and lift data to CSV +if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, script_name) + +# Calculate and print average Cd and Cl for the last 50% +drag_values_array = np.array(drag_values) +if len(drag_values) > 0: + start_index = len(drag_values) // 2 + last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(last_half[:, 0]) + avg_cl = np.mean(last_half[:, 1]) + print(f"Average Drag Coefficient (Cd) for last {(100-file_output_crossover_percentage)}%: {avg_cd:.6f}") + print(f"Average Lift Coefficient (Cl) for last {(100-file_output_crossover_percentage)}%: {avg_cl:.6f}") + print(f"Experimental Drag Coefficient (Cd): {0.3321}") + print(f"Error Drag Coefficient (Cd): {((avg_cd-0.3321)/0.3321)*100:.2f}%") + +else: + print("No drag or lift data collected.") + diff --git a/examples/windtunnel_json.py b/examples/windtunnel_json.py new file mode 100644 index 00000000..2c3a1014 --- /dev/null +++ b/examples/windtunnel_json.py @@ -0,0 +1,1253 @@ +import neon +import warp as wp +import numpy as np +import os, sys, time, trimesh +import matplotlib.pyplot as plt + +import xlb +from xlb.compute_backend import ComputeBackend +from xlb.precision_policy import PrecisionPolicy +from xlb.grid import multires_grid_factory +from xlb.operator.boundary_condition import ( + FullwayBounceBackBC, + HalfwayBounceBackBC, + RegularizedBC, + ExtrapolationOutflowBC, + DoNothingBC, + ZouHeBC, + HybridBC, +) +from xlb.operator.boundary_masker import MeshVoxelizationMethod +from xlb.utils.mesher import make_cuboid_mesh, MultiresIO +from xlb.utils.makemesh import generate_mesh +from xlb.operator.force import MultiresMomentumTransfer +from xlb.helper.initializers import CustomMultiresInitializer +from xlb import MresPerfOptimizationType +import httpx, logging, getopt, json +from json.decoder import JSONDecodeError +from uuid import uuid4 +from threading import Thread +from typing import Any +# Use 8 CPU devices if running on ACP +acp_env = os.environ.get('ACP_ENVIRONMENT', '') +if acp_env not in ('', 'local'): + os.environ["XLA_FLAGS"] = '--xla_force_host_platform_device_count=8' + +WORKER_PROTOCOL = os.environ.get('SCM_PROTOCOL', '') +WORKER_HOST = os.environ.get('SCM_HOST', '') +WORKER_PORT = os.environ.get('SCM_PORT', '') + +HEARTBEAT_SLEEP = int(float(os.environ.get('SCM_SOLVERHEARTBEAT', 1000)) / 1000) +HEARTBEAT_THREAD = None +HEARTBEAT_CANCELLED = False + +### SCM Functions ### +def running_via_scm(): + """ + Checks if the code is running via the SCM worker protocol. + + Returns: + bool: True if WORKER_PROTOCOL is set (indicating execution via SCM), False otherwise. + """ + + if WORKER_PROTOCOL: + return True + + return False + +def scm_event(endpoint, data=0, event_id=''): + """ + Sends an event to a specified SCM worker endpoint using HTTP POST and returns the response. + + Args: + endpoint (str): The endpoint path to send the event to. + data (int, optional): The data payload to send. Defaults to 0. + event_id (str, optional): An identifier for the event. Defaults to ''. + + Returns: + Any: The 'response' field from the JSON response if available, otherwise the provided event_id. + + Notes: + - If any of WORKER_PROTOCOL, WORKER_HOST, WORKER_PORT, or endpoint are not set, returns the event_id. + - If the response cannot be decoded as JSON, returns the event_id. + """ + + if not endpoint or not WORKER_PROTOCOL or not WORKER_HOST or not WORKER_PORT: + return event_id + + url = f'{WORKER_PROTOCOL}://{WORKER_HOST}:{WORKER_PORT}{endpoint}' + + headers = { + 'Content-Type': 'application/json' + } + + data = { + 'data': data, + 'id': event_id, + } + + response = httpx.post(url, headers=headers, json=data) + + try: + return response.json().get('response', event_id) + except JSONDecodeError: + return event_id + + return event_id + +def heartbeat(): + """ + Continuously sends a heartbeat signal to the compute worker endpoint to indicate the process is alive. + + The function repeatedly calls the `scm_event` function with the '/ComputeWorker/v1/heartbeat' endpoint. + If the response is 'canceled' or the global variable `HEARTBEAT_CANCELLED` is set to True, the loop breaks and the function returns. + Otherwise, the function sleeps for a duration specified by the global variable `HEARTBEAT_SLEEP` before sending the next heartbeat. + + Returns: + None + """ + + while True: + response = scm_event('/ComputeWorker/v1/heartbeat') + + if response == 'canceled' or HEARTBEAT_CANCELLED: + return + + time.sleep(HEARTBEAT_SLEEP) + +def scm_init(): + """ + Performs SCM initialization by attaching to the compute worker and starting the heartbeat thread. + + This function performs the following actions: + 1. Sends an attach event to the compute worker endpoint. + 2. Creates and starts a global heartbeat thread to maintain regular communication and status checks. + + Globals: + HEARTBEAT_THREAD: Thread object responsible for running the heartbeat function. + + Side Effects: + Modifies the global HEARTBEAT_THREAD variable and starts a new thread. + """ + + global HEARTBEAT_THREAD + + scm_event('/ComputeWorker/v1/attach', 1) + + HEARTBEAT_THREAD = Thread(target=heartbeat) + HEARTBEAT_THREAD.start() + + scm_progress(0) + +def scm_progress(progress): + """ + Sends a progress update to the SCM compute worker. + + Args: + progress (int): The progress value to send, between 0 and 100. + + Returns: + None + """ + + scm_event('/ComputeWorker/v1/progress', progress) + +def scm_results_available(final_update=False): + """ + Notifies that results are available by sending an event to the '/ComputeWorker/v1/results' endpoint. + + Args: + final_update (bool, optional): Indicates whether this is the final update. Defaults to False. + + Returns: + None + """ + + scm_event('/ComputeWorker/v1/results', int(final_update)) + +def scm_cancel_heartbeat(): + """ + Cancels the ongoing heartbeat process by setting the HEARTBEAT_CANCELLED flag to True. + If a heartbeat thread is running, waits for it to finish and then resets the thread reference. + """ + + global HEARTBEAT_CANCELLED + global HEARTBEAT_THREAD + + HEARTBEAT_CANCELLED = True + if HEARTBEAT_THREAD: + HEARTBEAT_THREAD.join() + HEARTBEAT_THREAD = None + +def scm_set_error(code, message): + """ + Sets an error state by sending an error code and message to the ComputeWorker event handler. + + Args: + code (int): The error code representing the type of error. + message (str): A descriptive message explaining the error. + + Returns: + None + + Side Effects: + Triggers the '/ComputeWorker/v1/seterror' event with the provided code and message. + """ + + scm_event('/ComputeWorker/v1/seterror', code, message) + +def scm_complete(): + """ + Notifies the SCM worker that the process is complete by sending a completion event. + + Returns: + None + """ + + scm_progress(100) + + scm_event('/ComputeWorker/v1/complete', 1, str(uuid4())) + + scm_cancel_heartbeat() +#################### + +wp.clear_kernel_cache() +wp.config.quiet = False + +def prep_inputs(input_file): + start_time = time.time() + f = open(input_file) + jsonfile = json.load(f) + proj_path = os.path.dirname(os.path.abspath(input_file)) + jsonfile['projPath'] = proj_path + settings = jsonfile['settings'] + voxel_size = settings['voxelSize'] + ulb = settings['ulb'] + # Extract the inlet velocity from the json dict + prescribed_velocity_phys = jsonfile['InletBC']['x'] + if running_via_scm(): + output_dir = proj_path + else: + output_dir = os.path.join(proj_path, jsonfile['outputName']) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for fx in [os.path.join(output_dir,f) for f in os.listdir(output_dir)]: + os.remove(fx) + + + with open(os.path.join(output_dir, "project.log"),'w') as fd: + fd.write("*** Studio Wind Tunnel Solver Log File ***\n\n\n") + fd.write("Date Created: "+time.asctime(time.localtime())+" \n\n") + fd.write("Processing input json ... \n\n") + logging.info("Processing input json ...") + + # Set accuracy and lattice type + if settings['doublePrecision']==True: + precision_policy = PrecisionPolicy.FP64FP64 + elif settings['doublePrecision']==-1: + precision_policy = PrecisionPolicy.FP16FP16 + else: + precision_policy = PrecisionPolicy.FP32FP32 + + compute_backend = ComputeBackend.NEON + velocity_set = xlb.velocity_set.D3Q27(precision_policy=precision_policy, compute_backend=compute_backend) + + ### Process Car for obj and scale + body_stl = os.path.join(proj_path, str(jsonfile['vehicle']['body'])) + filename, file_extension = os.path.splitext(body_stl) + + body_mesh = trimesh.load_mesh(body_stl, process=False) + if file_extension =='.obj': + body_mesh.apply_scale(0.01) + body_mesh.export(os.path.join(output_dir, filename+'.stl')) + body_mesh = trimesh.load_mesh(os.path.join(output_dir, filename+'.stl'), process=False) + + #If any wheels listed + if len(jsonfile['vehicle']['wheels']) > 0: + wheel_stls = [] + for wheel in jsonfile['vehicle']['wheels']: + wheel = os.path.join(proj_path, wheel) + wheel_stls.append(wheel) + wheel_meshes =[] + w=1 + for wheel in wheel_stls: + wheel_mesh = trimesh.load_mesh(wheel, process=False) + if file_extension =='.obj': + wheel_mesh.apply_scale(0.01) + wheel_mesh.export(os.path.join(output_dir, 'wheel'+str(w)+'.stl')) + wheel_mesh = trimesh.load_mesh(os.path.join(output_dir, 'wheel'+str(w)+'.stl')) + w+=1 + wheel_meshes.append(wheel_mesh) + + car_mesh = trimesh.util.concatenate(body_mesh + wheel_meshes) + else: + car_mesh = body_mesh + wheel_meshes=None + + # =========== + # Initialize XLB + xlb.init( + velocity_set=velocity_set, + default_backend=compute_backend, + default_precision_policy=precision_policy, + ) + + + level_data, body_vertices, wheel_vertices, grid_shape_zip, partSize, actual_num_levels, shift, sparsity_pattern, level_origins = mesh_prep( + voxel_size, car_mesh, body_mesh, wheel_meshes, output_dir, jsonfile + ) + + + # Characteristic length + L = float(partSize[0]) + #Material Setup + material = jsonfile['fluid'] + density = material['density'] + dynamic_viscosity = material['viscosity'] + kinematic_viscosity = dynamic_viscosity / density + + # Compute Re + Re = abs(prescribed_velocity_phys) * L / kinematic_viscosity + + # Calculate lattice parameters + delta_x_coarse = voxel_size * 2 ** (actual_num_levels - 1) + delta_t = voxel_size * ulb / prescribed_velocity_phys + lbm_visc = kinematic_viscosity * delta_t / (voxel_size ** 2) + omega = 1.0 / (3.0 * lbm_visc + 0.5) + + # Define exporter objects + + field_name_cardinality_dict = {"velocity": 3, "density": 1} + h5exporter = MultiresIO( + field_name_cardinality_dict, + level_data, + scale=voxel_size, + offset=-shift, + timestep_size=delta_t, + ) + bc_mask_exporter = MultiresIO({"bc_mask": 1}, level_data) + + # Create grid + grid = multires_grid_factory( + grid_shape_zip, + velocity_set=velocity_set, + sparsity_pattern_list=sparsity_pattern, + sparsity_pattern_origins=[neon.Index_3d(*box_origin) for box_origin in level_origins], + ) + # Calculate num_steps + coarsest_level = grid.count_levels - 1 + grid_shape_x_coarsest = grid.level_to_shape(coarsest_level)[0] + if jsonfile['settings']['flowPasses'] > 0: + num_steps = int(jsonfile['settings']['flowPasses'] * (grid_shape_x_coarsest / ulb)) + else: + num_steps = int(jsonfile['settings']['iterations']) + + + # Setup boundary conditions + boundary_conditions = setup_boundary_conditions(grid, level_data, body_vertices, wheel_vertices, ulb, lbm_visc, grid_shape_zip, precision_policy, jsonfile, velocity_set, compute_backend) + + # Create initializer + initializer = CustomMultiresInitializer( + bc_id=boundary_conditions[-2].id, # bc_outlet + constant_velocity_vector=(ulb, 0.0, 0.0), + velocity_set=velocity_set, + precision_policy=precision_policy, + compute_backend=compute_backend, + ) + + # Initialize simulation + sim = xlb.helper.MultiresSimulationManager( + omega=omega, + grid=grid, + boundary_conditions=boundary_conditions, + collision_type="KBC", + initializer=initializer, + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + ) + + # Compute voxel statistics and reference area + stats = compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size) + active_voxels = stats["active_voxels"] + solid_voxels = stats["solid_voxels"] + total_voxels = stats["total_voxels"] + total_lattice_updates_per_step = stats["total_lattice_updates_per_step"] + reference_area = stats["reference_area"] + reference_area_physical = stats["reference_area_physical"] + + wp.synchronize() + + # Setup momentum transfer + momentum_transfer = MultiresMomentumTransfer( + boundary_conditions[-1], + mres_perf_opt=xlb.MresPerfOptimizationType.FUSION_AT_FINEST, + compute_backend=compute_backend, + ) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write('Material Properties\n') + fd.write('___________________\n') + fd.write(f'Density: {density:.4f} kg/m3\n') + fd.write(f'Visc Dyn: {dynamic_viscosity:.4e} Pa-s\n') + fd.write(f'Visc Kin: {kinematic_viscosity:.4e} m2/s\n') + fd.write(f'Visc LBM: {lbm_visc:.4e} \n\n') + fd.write('Boundary Setup\n') + fd.write('___________________\n') + fd.write(f"Walls: {jsonfile['BCtypes']['walls']}\n") + fd.write(f"Ground: {jsonfile['BCtypes']['ground']}\n") + fd.write('\nSolver Parameters\n') + fd.write('___________________\n') + fd.write(f"Number of flow passes: {jsonfile['settings']['flowPasses']}\n") + fd.write(f"Calculated iterations: {num_steps:,}\n") + fd.write(f"Finest voxel size: {voxel_size} meters\n") + fd.write(f"Coarsest voxel size: {delta_x_coarse} meters\n") + fd.write(f"Total voxels: {sum(np.count_nonzero(mask) for mask in sparsity_pattern):,}\n") + fd.write(f"Total active voxels: {total_voxels:,}\n") + fd.write(f"Active voxels per level: {active_voxels}\n") + fd.write(f"Solid voxels per level: {solid_voxels}\n") + fd.write(f"Total lattice updates per global step: {total_lattice_updates_per_step:,}\n") + fd.write(f"Actual number of refinement levels: {actual_num_levels}\n") + fd.write(f"Physical inlet velocity: {prescribed_velocity_phys:.4f} m/s\n") + fd.write(f"Lattice velocity (ulb): {ulb}\n") + fd.write(f"Characteristic length: {L: .4f} meters\n") + fd.write(f"Computed reference area (bc_mask): {reference_area} lattice units\n") + fd.write(f"Physical reference area (bc_mask): {reference_area_physical:.6f} m^2\n") + fd.write(f"Reynolds number: {Re:,.2f}\n") + fd.write(f'Inlet Velocity: {prescribed_velocity_phys:.1f} m/s \n') + fd.write(f'Timestep Size: {delta_t:.4e} seconds\n') + fd.write('Omega: '+str(omega)+'\n') + fd.write('ULB: '+str(settings['ulb'])+'\n\n') + fd.write('Results\n') + fd.write('___________________\n') + fd.write(f'Time to initialize: {(time.time()-start_time)/60:.2f} min\n') + + + solve( + sim, + ulb, + num_steps, + h5exporter, + output_dir, + grid_shape_zip, + grid_shape_x_coarsest, + delta_x_coarse, + shift, + momentum_transfer, + reference_area, + voxel_size, + prescribed_velocity_phys, + total_lattice_updates_per_step, + jsonfile + ) + + +# Mesh Generation Functions +# ========================= +def mesh_prep(voxel_size, car_mesh, body_mesh, wheel_meshes, output_dir, jsonfile): + + # Compute bounds on full car + min_bound = car_mesh.vertices.min(axis=0) + max_bound = car_mesh.vertices.max(axis=0) + partSize = max_bound - min_bound + + + mesher_type = jsonfile['mesher']['type'] + # Generate mesh + if mesher_type == "mres": + shift = np.array( + [ + jsonfile['mesher']['mres']['domain']["-x"] * partSize[0] - min_bound[0], + jsonfile['mesher']['mres']['domain']["-y"] * partSize[1] - min_bound[1], + jsonfile['mesher']['mres']['domain']["-z"] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + #Apply shift to car mesh for meshing purpose + car_mesh.apply_translation(shift) + _ = car_mesh.vertex_normals + car_mesh.export("temp.stl") + + # Generate mesh using generate_mesh with ground refinement + level_data, _, sparsity_pattern, level_origins = generate_mesh( + jsonfile['mesher']['mres']['levels'], + "temp.stl", + jsonfile['settings']['voxelSize'], + jsonfile['mesher']['mres']['padding'], + jsonfile['mesher']['mres']['domain'], + ground_refinement_level=jsonfile['mesher']['mres']['ground_refinement_level'], + ground_voxel_height=jsonfile['mesher']['mres']['ground_voxel_height'], + ) + elif mesher_type == "cuboid": + # Compute translation to put mesh into first octant of the domain + domain_multiplier = jsonfile['mesher']['cuboid'] + shift = np.array( + [ + domain_multiplier[0][0] * partSize[0] - min_bound[0], + domain_multiplier[0][2] * partSize[1] - min_bound[1], + domain_multiplier[0][4] * partSize[2] - min_bound[2], + ], + dtype=float, + ) + #Apply shift to car mesh for meshing purpose + car_mesh.apply_translation(shift) + _ = car_mesh.vertex_normals + car_mesh.export("temp.stl") + + # Generate mesh using Cuboid Mesher on full car + level_data, sparsity_pattern, level_origins = make_cuboid_mesh( + jsonfile['settings']['voxelSize'], + domain_multiplier, + "temp.stl", + ) + else: + raise ValueError(f"Invalid mesher_type: {mesher_type}. Must be 'mres' or 'cuboid'.") + + # Apply translation to each part + body_mesh.apply_translation(shift) + + if wheel_meshes is not None: + wheel_vertices = [] + body_vertices = np.asarray(body_mesh.vertices) / voxel_size + for mesh in wheel_meshes: + mesh.apply_translation(shift) + if jsonfile['mesher']['trim'] == True: + zShift = jsonfile['mesher']['trim_voxels'] + plane_origin = np.array([0, 0, mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export(os.path.join(output_dir, 'temp.stl')) + wheel_stl = os.path.join(output_dir, 'temp.stl') + wheel_mesh = trimesh.load_mesh(wheel_stl, process=False) + wheel_vertices.append(np.asarray(wheel_mesh.vertices) / voxel_size) + + else: + wheel_vertices.append(np.asarray(mesh.vertices) / voxel_size) + else: + #No Wheels trim body as needed + wheel_vertices=None + if jsonfile['mesher']['trim'] == True: + zShift = jsonfile['mesher']['trim_voxels'] + plane_origin = np.array([0, 0, body_mesh.bounds[0][2]+(zShift* voxel_size)]) + plane_normal = np.array([0, 0, 1]) # Upward pointing normal + # Slice the mesh using the defined plane. + # With cap=True, the open slice is automatically closed off. + mesh_above = body_mesh.slice_plane(plane_origin=plane_origin, + plane_normal=plane_normal, + cap=True) + mesh_above.export(os.path.join(output_dir, 'temp.stl')) + body_stl = os.path.join(output_dir, 'temp.stl') + body_mesh = trimesh.load_mesh(body_stl, process=False) + body_vertices = np.asarray(body_mesh.vertices) / voxel_size + else: + body_vertices = np.asarray(body_mesh.vertices) / voxel_size + + + actual_num_levels = len(level_data) + grid_shape_finest = tuple([int(i * 2 ** (actual_num_levels - 1)) for i in level_data[-1][0].shape]) + print(f"Requested levels: {len(domain_multiplier)}, Actual levels: {actual_num_levels}") + print(f"Full shape based on finest voxel size is {grid_shape_finest}") + # Clean all temp stls in the folder + for filename in os.listdir(output_dir): + # Check if the file ends with '.stl' and is a file (not a directory) + if filename.endswith('.stl') and os.path.isfile(os.path.join(output_dir, filename)): + file_path = os.path.join(output_dir, filename) + os.remove(file_path) + + return level_data, body_vertices, wheel_vertices, grid_shape_finest, partSize, actual_num_levels, shift, sparsity_pattern, level_origins + +# Boundary Conditions Setup +# ========================= +def setup_boundary_conditions(grid, level_data, body_vertices, wheel_vertices, ulb, lbm_visc, grid_shape_zip, precision_policy, jsonfile, velocity_set, compute_backend=ComputeBackend.NEON): + """ + Set up boundary conditions for the simulation. + """ + num_levels = len(level_data) + coarsest_level = num_levels - 1 + box = grid.bounding_box_indices(shape=grid.level_to_shape(coarsest_level)) + left_indices = grid.boundary_indices_across_levels(level_data, box_side="left", remove_edges=True) + right_indices = grid.boundary_indices_across_levels(level_data, box_side="right", remove_edges=True) + top_indices = grid.boundary_indices_across_levels(level_data, box_side="top", remove_edges=False) + bottom_indices = grid.boundary_indices_across_levels(level_data, box_side="bottom", remove_edges=False) + front_indices = grid.boundary_indices_across_levels(level_data, box_side="front", remove_edges=False) + back_indices = grid.boundary_indices_across_levels(level_data, box_side="back", remove_edges=False) + + # Filter front and back indices to remove overlaps with top and bottom at each level + filtered_front_indices = [] + filtered_back_indices = [] + filtered_top_indices = [] + filtered_bottom_indices = [] + for level in range(num_levels): + left_set = set(zip(*left_indices[level])) if left_indices[level] else set() + right_set = set(zip(*right_indices[level])) if right_indices[level] else set() + top_set = set(zip(*top_indices[level])) if top_indices[level] else set() + bottom_set = set(zip(*bottom_indices[level])) if bottom_indices[level] else set() + front_set = set(zip(*front_indices[level])) if front_indices[level] else set() + back_set = set(zip(*back_indices[level])) if back_indices[level] else set() + filtered_front_set = front_set - (top_set | bottom_set | left_set | right_set) + filtered_back_set = back_set - (top_set | bottom_set | left_set | right_set) + filtered_top_set = top_set - (left_set | right_set) + filtered_bottom_set = bottom_set - (left_set | right_set) + filtered_front_indices.append( + [list(coords) for coords in zip(*filtered_front_set)] if filtered_front_set else [] + ) + filtered_back_indices.append( + [list(coords) for coords in zip(*filtered_back_set)] if filtered_back_set else [] + ) + filtered_top_indices.append( + [list(coords) for coords in zip(*filtered_top_set)] if filtered_top_set else [] + ) + filtered_bottom_indices.append( + [list(coords) for coords in zip(*filtered_bottom_set)] if filtered_bottom_set else [] + ) + + # Inlet is either RegularizedBC or Noneq-Reg Hybrid with uniform value (set hybrid if ground refinement is on) + if jsonfile['BCtypes']['inlet'] == "RegularizedBC": + bc_inlet = RegularizedBC("velocity", + #profile=bc_profile_new(), + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + elif jsonfile['mesher']['type'] == 'mres' and jsonfile['mesher']['mres']['ground_refinement_level'] > -1 : + bc_inlet = HybridBC( + bc_method="nonequilibrium_regularized", + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + else: + bc_inlet = HybridBC( + bc_method="nonequilibrium_regularized", + prescribed_value=(ulb, 0.0, 0.0), + indices=left_indices, + ) + + bc_outlet = DoNothingBC(indices=right_indices) + + # Setup walls moving, static of fall back to FullBounce + if jsonfile['BCtypes']['walls'] == "moving": + bc_top =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=top_indices) + bc_front =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=filtered_front_indices) + bc_back =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=filtered_back_indices) + elif jsonfile['BCtypes']['walls'] == "static": + bc_top =HybridBC(bc_method="nonequilibrium_regularized", indices=top_indices) + bc_front =HybridBC(bc_method="nonequilibrium_regularized", indices=filtered_front_indices) + bc_back =HybridBC(bc_method="nonequilibrium_regularized", indices=filtered_back_indices) + else: + bc_top = FullwayBounceBackBC(indices=top_indices) + bc_front = FullwayBounceBackBC(indices=filtered_front_indices) + bc_back = FullwayBounceBackBC(indices=filtered_back_indices) + + # Setup ground moving, static or fall back to FullBounce + if jsonfile['BCtypes']['ground'] == "moving": + bc_bottom =HybridBC(bc_method="nonequilibrium_regularized", prescribed_value=(ulb, 0.0, 0.0), indices=bottom_indices) + elif jsonfile['BCtypes']['ground'] == "static": + bc_bottom =HybridBC(bc_method="nonequilibrium_regularized", indices=bottom_indices) + else: + bc_bottom = FullwayBounceBackBC(indices=bottom_indices) + + # Setup car as grads or non-eq + if jsonfile['BCtypes']['car'] == "bounceback_grads": + bc_body = HybridBC( + bc_method="bounceback_grads", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + else: + bc_body = HybridBC( + bc_method="nonequilibrium_regularized", + mesh_vertices=body_vertices, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + + # Setup Wheels as rotating or static + if wheel_vertices is not None: + wheel_bc = [] + for wheel_vertice in wheel_vertices: + if jsonfile['BCtypes']['wheels'] == "bounceback_grads": #need to add movign profile in here somehow + wheel_bc.append(HybridBC( + bc_method="bounceback_grads", + mesh_vertices=wheel_vertice, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + ) + else: + wheel_bc.append(HybridBC( + bc_method="nonequilibrium_regularized", + mesh_vertices=wheel_vertice, + voxelization_method=MeshVoxelizationMethod("AABB_CLOSE", close_voxels=jsonfile['mesher']['close_voxels']), + use_mesh_distance=True, + ) + ) + return wheel_bc + [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + else: + return [bc_top, bc_bottom, bc_front, bc_back, bc_inlet, bc_outlet, bc_body] # Body must be last. Outlet must be second to last + +# Utility Functions +# ================= +def print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values): + """ + Calculate and print lift and drag coefficients. + """ + boundary_force = momentum_transfer(sim.f_0, sim.f_1, sim.bc_mask, sim.missing_mask) + drag = boundary_force[0] + lift = boundary_force[2] + cd = 2.0 * drag / (ulb**2 * reference_area) + cl = 2.0 * lift / (ulb**2 * reference_area) + if np.isnan(cd) or np.isnan(cl): + raise ValueError(f"NaN detected in coefficients at step {step}: Cd={cd}, Cl={cl}") + drag_values.append([cd, cl]) + # print(f"CD={cd:.3f}, CL={cl:.3f}, Drag Force (lattice units)={drag:.6f}") + return cd, cl, drag + +def plot_drag_lift(drag_values, output_dir, print_interval, script_name, percentile_range=(15, 85), use_log_scale=False): + """ + Plot CD and CL over time and save the plot to the output directory. + """ + drag_values_array = np.array(drag_values) + steps = np.arange(0, len(drag_values) * print_interval, print_interval) + cd_values = drag_values_array[:, 0] + cl_values = drag_values_array[:, 1] + y_min = min(np.percentile(cd_values, percentile_range[0]), np.percentile(cl_values, percentile_range[0])) + y_max = max(np.percentile(cd_values, percentile_range[1]), np.percentile(cl_values, percentile_range[1])) + padding = (y_max - y_min) * 0.1 + y_min, y_max = y_min - padding, y_max + padding + if use_log_scale: + y_min = max(y_min, 1e-6) + plt.figure(figsize=(10, 6)) + plt.plot(steps, cd_values, label='Drag Coefficient (Cd)', color='blue') + plt.plot(steps, cl_values, label='Lift Coefficient (Cl)', color='red') + plt.xlabel('Simulation Step') + plt.ylabel('Coefficient') + plt.title(f'{script_name}: Drag and Lift Coefficients Over Time') + plt.legend() + plt.grid(True) + plt.ylim(y_min, y_max) + if use_log_scale: + plt.yscale('log') + plt.savefig(os.path.join(output_dir, 'drag_lift_plot.png')) + plt.close() + +def compute_voxel_statistics_and_reference_area(sim, bc_mask_exporter, level_data, actual_num_levels, sparsity_pattern, boundary_conditions, voxel_size): + """ + Compute active/solid voxels, totals, lattice updates, and reference area based on simulation data. + """ + # Compute macro fields + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + fields_data = bc_mask_exporter.get_fields_data({"bc_mask": sim.bc_mask}) + bc_mask_data = fields_data["bc_mask_0"] + level_id_field = bc_mask_exporter.level_id_field + + # Compute solid voxels per level (assuming 255 is the solid marker) + solid_voxels = [] + for lvl in range(actual_num_levels): + level_mask = level_id_field == lvl + solid_voxels.append(np.sum(bc_mask_data[level_mask] == 255)) + + # Compute active voxels (total non-zero in sparsity minus solids) + active_voxels = [np.count_nonzero(mask) for mask in sparsity_pattern] + active_voxels = [max(0, active_voxels[lvl] - solid_voxels[lvl]) for lvl in range(actual_num_levels)] + + # Totals + total_voxels = sum(active_voxels) + total_lattice_updates_per_step = sum(active_voxels[lvl] * (2 ** (actual_num_levels - 1 - lvl)) for lvl in range(actual_num_levels)) + + # Compute reference area (projected on YZ plane at finest level) + finest_level = 0 + mask_finest = level_id_field == finest_level + bc_mask_finest = bc_mask_data[mask_finest] + active_indices_finest = np.argwhere(level_data[0][0]) + bc_body_id = boundary_conditions[-1].id # Assuming last BC is bc_body + solid_voxels_indices = active_indices_finest[bc_mask_finest == bc_body_id] + unique_jk = np.unique(solid_voxels_indices[:, 1:3], axis=0) + reference_area = unique_jk.shape[0] + reference_area_physical = reference_area * (voxel_size ** 2) + + return { + "active_voxels": active_voxels, + "solid_voxels": solid_voxels, + "total_voxels": total_voxels, + "total_lattice_updates_per_step": total_lattice_updates_per_step, + "reference_area": reference_area, + "reference_area_physical": reference_area_physical + } + + +def save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse,voxel_size, sim,jsonfile): + domainSize = np.array(grid_shape_zip) * voxel_size + outputSlices = jsonfile['outputSlices'] + # Map axis to plane normal + axis_to_normal = { + 'X': [1, 0, 0], + 'Y': [0, 1, 0], + 'Z': [0, 0, 1] + } + domain_min = -shift + domain_max = domain_min + domainSize + tic = time.time() + def compute_slice_bounds_relative_to_domain(origin, width, height, width_vec, height_vec, domain_min, domain_max, plane_normal): + # Build in-plane basis + n = np.array(plane_normal) / np.linalg.norm(plane_normal) + if np.allclose(n, [1, 0, 0]): + u1 = np.array([0, 1, 0]) + else: + u1 = np.array([1, 0, 0]) + u1 = u1 / np.linalg.norm(u1) + u2 = np.cross(n, u1) + u2 = u2 / np.linalg.norm(u2) + width_vec_norm = width_vec / np.linalg.norm(width_vec) + height_vec_norm = height_vec / np.linalg.norm(height_vec) + if np.dot(u1, width_vec_norm) < 0: + u1 = -u1 + if np.dot(u2, height_vec_norm) < 0: + u2 = -u2 + + # Use the lower-left corner of the slice as the reference point + ref_point = origin + + # Project domain corners onto the plane and compute in-plane coordinates + domain_corners = np.array([ + [domain_min[0], domain_min[1], domain_min[2]], + [domain_max[0], domain_min[1], domain_min[2]], + [domain_min[0], domain_max[1], domain_min[2]], + [domain_max[0], domain_max[1], domain_min[2]], + [domain_min[0], domain_min[1], domain_max[2]], + [domain_max[0], domain_min[1], domain_max[2]], + [domain_min[0], domain_max[1], domain_max[2]], + [domain_max[0], domain_max[1], domain_max[2]] + ]) + local_corners = [] + for corner in domain_corners: + # Project corner onto the plane + proj = corner - np.dot(corner - ref_point, n) * n + local_x = np.dot(proj - ref_point, u1) + local_y = np.dot(proj - ref_point, u2) + local_corners.append([local_x, local_y]) + local_corners = np.array(local_corners) + domain_u_min, domain_u_max = local_corners[:, 0].min(), local_corners[:, 0].max() + domain_v_min, domain_v_max = local_corners[:, 1].min(), local_corners[:, 1].max() + + # Project slice corners onto the plane and compute in-plane coordinates + slice_corners = [ + origin, + origin + width * width_vec, + origin + height * height_vec, + origin + width * width_vec + height * height_vec + ] + slice_local = [] + for corner in slice_corners: + proj = corner - np.dot(corner - ref_point, n) * n + local_x = np.dot(proj - ref_point, u1) + local_y = np.dot(proj - ref_point, u2) + slice_local.append([local_x, local_y]) + slice_local = np.array(slice_local) + slice_u_min, slice_u_max = slice_local[:, 0].min(), slice_local[:, 0].max() + slice_v_min, slice_v_max = slice_local[:, 1].min(), slice_local[:, 1].max() + + # Convert to fractions + u_min = (slice_u_min - domain_u_min) / (domain_u_max - domain_u_min) + u_max = (slice_u_max - domain_u_min) / (domain_u_max - domain_u_min) + v_min = (slice_v_min - domain_v_min) / (domain_v_max - domain_v_min) + v_max = (slice_v_max - domain_v_min) / (domain_v_max - domain_v_min) + + return [max(0,u_min), min(1,u_max), max(0,v_min), min(1,v_max)] + + for slice_group in outputSlices: + field_name = slice_group['field'] + axis = slice_group['axis'] + height = slice_group['height'] + width = slice_group['width'] + # Extract vectors + height_vec = np.array([ + slice_group['heightVec']['x'], + slice_group['heightVec']['y'], + slice_group['heightVec']['z'] + ]) + width_vec = np.array([ + slice_group['widthVec']['x'], + slice_group['widthVec']['y'], + slice_group['widthVec']['z'] + ]) + + # Get plane normal + plane_normal = axis_to_normal[axis] + + # Process each origin + for idx, origin_dict in enumerate(slice_group['origin']): + # The origin / plane point is the lower-left corner of the slice + plane_point = np.array([ + origin_dict['x'], + origin_dict['y'], + origin_dict['z'] + ]) + + # Calculate bounds in model units + + # Calculate the bounds + # Since we're given absolute dimensions, we need to compute + # the bounds relative to the full domain extent in the plane + # For now, we'll use bounds [0, 1, 0, 1] to capture the full slice + # as defined by the width and height + bounds = [0, 1, 0, 1] + bounds_x, bounds_x2, bounds_y, bounds_y2 = compute_slice_bounds_relative_to_domain(plane_point, width, height, width_vec, height_vec, domain_min, domain_max, plane_normal) + print(f'bounds {bounds_x}, {bounds_x2}, {bounds_y}, {bounds_y2}') + # Alternatively, if you want to compute bounds relative to domain: + # You would need to: + # 1. Project domain extents onto the plane + # 2. Calculate where this slice sits within those extents + # 3. Set bounds accordingly + + # Generate output filename + output_filename = os.path.join( + output_dir, + f"{axis}_slice_{idx:03d}" + ) + + print(f"Generating slice: {output_filename}") + print(f" Axis: {axis}, Normal: {plane_normal}") + print(f" Plane point: {plane_point}") + print(f" Width: {width}, Height: {height}") + wp.synchronize() + + h5exporter.to_slice_image( + output_filename, + {"velocity": sim.u}, + plane_point=plane_point, + plane_normal=plane_normal, + grid_res=1000, + bounds=(bounds_x, bounds_x2, bounds_y, bounds_y2), + show_axes=False, + show_colorbar=False, + cmap=jsonfile['settings']['sliceColorMap'], + normalize=jsonfile['InletBC']['x'] * jsonfile['settings']['sliceFactor'], + slice_thickness=delta_x_coarse #needed when using model units + ) + print(f"Time to save all images {time.time()-tic} seconds. ") + + +def solve( + sim, + ulb, + num_steps, + h5exporter, + output_dir, + grid_shape_zip, + grid_shape_x_coarsest, + delta_x_coarse, + shift, + momentum_transfer, + reference_area, + voxel_size, + prescribed_velocity_phys, + total_lattice_updates_per_step, + jsonfile + ): + + # -------------------------- Simulation Loop -------------------------- + wp.synchronize() + print(f"\n*******\nSolver Started\n*******\n") + start_time = time.time() + solve_start = start_time + compute_time = 0.0 + steps_since_last_print = 0 + drag_values = [] + + # Calculate print and file output intervals + print_interval = max(1, int(num_steps * (jsonfile['settings']['solutionPrintFreq'] / 100.0))) + crossover_step = int(num_steps * (jsonfile['settings']['crossover'] / 100.0)) + file_output_interval_pre_crossover = max(1, int(crossover_step / jsonfile['settings']['preCrossover_frames'])) if jsonfile['settings']['preCrossover_frames'] > 0 else num_steps + 1 + file_output_interval_post_crossover = max(1, int((num_steps - crossover_step) / jsonfile['settings']['postCrossover_frames'])) if jsonfile['settings']['postCrossover_frames'] > 0 else num_steps + 1 + final_print_interval = max(1, int((num_steps-crossover_step) * (jsonfile['settings']['crossover'] / 100.0))) + + if jsonfile['settings']['debug']: + for step in range(num_steps): + solution_time =(time.time()-solve_start)/60 + step_start = time.time() + sim.step() + wp.synchronize() + compute_time += time.time() - step_start + steps_since_last_print += 1 + percent_complete = (step + 1) / num_steps * 100 + scm_progress(np.floor(percent_complete)) + end_time = time.time() + elapsed = end_time - start_time + time_out = False + if elapsed/60 >= jsonfile['settings']['limit']: + time_out = True + if (step % print_interval == 0 and step < crossover_step) or step == num_steps - 1 or time_out: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2000, + bounds=(0.25, 0.75, 0, 0.5), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = prescribed_velocity_phys*jsonfile['settings']['sliceFactor'], + ) + + total_lattice_updates = total_lattice_updates_per_step * steps_since_last_print + MLUPS = total_lattice_updates / compute_time / 1e6 if compute_time > 0 else 0.0 + current_flow_passes = step * ulb / grid_shape_x_coarsest + remaining_steps = num_steps - step - 1 + time_remaining = 0.0 if MLUPS == 0 else (total_lattice_updates_per_step * remaining_steps) / (MLUPS * 1e6) + hours, rem = divmod(time_remaining, 3600) + minutes, seconds = divmod(rem, 60) + time_remaining_str = f"{int(hours):02d}h {int(minutes):02d}m {int(seconds):02d}s" + + print(f"Completed step {step}/{num_steps} ({percent_complete:.2f}% complete)") + print(f" Flow Passes: {current_flow_passes:.2f}") + print(f" Time elapsed: {elapsed:.1f}s, Compute time: {compute_time:.1f}s, ETA: {time_remaining_str}") + print(f" MLUPS: {MLUPS:.1f}") + print(f" Cd={cd:.3f}, Cl={cl:.3f}, Drag Force (lattice units)={drag:.3f}") + #start_time = time.time() + compute_time = 0.0 + steps_since_last_print = 0 + scm_results_available() + if time_out: + wp.synchronize() + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse,voxel_size, sim,jsonfile) + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"*** Solution Timed out ***\n") + fd.write(f"Actual iterations: {step}\n") + print('Time limit reached') + break + + file_output_interval = file_output_interval_pre_crossover if step < crossover_step else file_output_interval_post_crossover + if step % file_output_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + wp.synchronize() + if step >= crossover_step and step % final_print_interval ==0 : + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size) + print(f"Completed step {step}/{num_steps} ") + print(f" Cd= {cd:.3f}, Cl= {cl:.3f}, Drag Force (lattice units)={drag:.3f}") + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_slice_image( + filename, + {"velocity": sim.u}, + plane_point=(1, 0, 0), + plane_normal=(0, 1, 0), + grid_res=2000, + bounds=(0, 1, 0, 1), + show_axes=False, + show_colorbar=False, + slice_thickness=delta_x_coarse, #needed when using model units + normalize = prescribed_velocity_phys*jsonfile['settings']['sliceFactor'], + ) + + + + + + # Save drag and lift data to CSV + if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, jsonfile['outputName']) + + # Calculate and print average Cd and Cl for the last 50% + drag_values_array = np.array(drag_values) + + start_index = int(len(drag_values) * (jsonfile['settings']['crossover'] / 100.0)) + last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(last_half[:, 0]) + avg_cl = np.mean(last_half[:, 1]) + epsilon = 1e-6 + target_cd = jsonfile['vehicle']['targets']['cd'] + epsilon + target_cl = jsonfile['vehicle']['targets']['cl'] + epsilon + print(f"Experimental Drag Coefficient (Cd): {target_cd}\n" + f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n" + f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n" + f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n" + f"Error Lift Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n" + ) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n") + fd.write(f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n") + fd.write(f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n") + fd.write(f"Error Lift Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n") + fd.write(f'Total Solution Time: {(time.time()-solve_start)/60:.3f} min\n') + + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse, voxel_size,sim,jsonfile) + with open(os.path.join(output_dir, "source.json"), 'w') as file: + json.dump(jsonfile, file, indent=4) # indent for pretty-printing + print(f"Source Json written to {os.path.join(output_dir, 'source.json')} successfully.") + + scm_results_available(True) + # Customer style run (no extra debug outputs) + # Runs setup and then only takes data from crossover to end + else: + print_interval=max(1, int((num_steps-crossover_step) * (jsonfile['settings']['solutionPrintFreq'] / 100.0))) + + for step in range(num_steps): + end_time = time.time() + elapsed = end_time - start_time + sim.step() + wp.synchronize() + percent_complete = (step + 1) / num_steps * 100 + scm_progress(np.floor(percent_complete)) + + if elapsed/60 >= jsonfile['settings']['limit']: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"*** Solution Time Reached ***\n") + fd.write(f"Actual iterations: {step}\n") + print('Time limit reached') + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse, voxel_size,sim,jsonfile) + if jsonfile['settings']['fullData']==True: + wp.synchronize() + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=1) + + scm_results_available() + break + + if step >= crossover_step: + if step % print_interval == 0 or step == num_steps - 1: + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + wp.synchronize() + cd, cl, drag = print_lift_drag(sim, step, momentum_transfer, ulb, reference_area, voxel_size, drag_values) + scm_results_available() + + if (step == num_steps - 1) & (jsonfile['settings']['fullData']==True): + wp.synchronize() + sim.macro(sim.f_0, sim.bc_mask, sim.rho, sim.u, streamId=0) + filename = os.path.join(output_dir, f"{jsonfile['outputName']}_{step:04d}") + h5exporter.to_hdf5(filename, {"velocity": sim.u, "density": sim.rho}, compression="gzip", compression_opts=0) + + # Save drag and lift data to CSV + if len(drag_values) > 0: + with open(os.path.join(output_dir, "drag_lift.csv"), 'w') as fd: + fd.write("Step,Cd,Cl\n") + for i, (cd, cl) in enumerate(drag_values): + fd.write(f"{i * print_interval},{cd},{cl}\n") + plot_drag_lift(drag_values, output_dir, print_interval, jsonfile['outputName']) + + # Calculate and print average Cd and Cl for the last 50% + drag_values_array = np.array(drag_values) + + #start_index = int(len(drag_values) * (jsonfile['settings']['crossover'] / 100.0)) + #last_half = drag_values_array[start_index:, :] + avg_cd = np.mean(drag_values_array[:, 0]) + avg_cl = np.mean(drag_values_array[:, 1]) + epsilon = 1e-6 + target_cd = jsonfile['vehicle']['targets']['cd'] + epsilon + target_cl = jsonfile['vehicle']['targets']['cl'] + epsilon + print(f"Experimental Drag Coefficient (Cd): {0.307}\n" + f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n" + f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n" + f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n" + f"Error Lift Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n" + ) + + with open(os.path.join(output_dir, "project.log"),'a') as fd: + fd.write(f"Average Drag Coefficient (Cd) for last {100-jsonfile['settings']['crossover']}%: {avg_cd:.6f}\n") + fd.write(f"Average Lift Coefficient (Cl) for last {100-jsonfile['settings']['crossover']}%: {avg_cl:.6f}\n") + fd.write(f"Error Drag Coefficient (Cd): {((avg_cd-target_cd)/target_cd)*100:.2f}%\n") + fd.write(f"Error Drag Coefficient (Cl): {((avg_cl-target_cl)/target_cl)*100:.2f}%\n") + fd.write(f'Total Solution Time: {(time.time()-solve_start)/60:.3f} min\n') + save_slices(output_dir, grid_shape_zip, shift, h5exporter, delta_x_coarse,voxel_size, sim,jsonfile) + + scm_results_available(True) + + + +def main(argv): + """ + Main entry point for the Studio Wind Tunnel Solver. + + Parses command-line arguments to obtain the input JSON file, initializes the simulation environment, + cleans up previous output files, and runs the wind tunnel simulation. Handles errors and reports + progress and completion status via SCM events. + + Args: + argv (list): List of command-line arguments. + + Returns: + int: Exit code. Returns 0 on success, 64 on argument/input errors, or 1 on simulation failure. + """ + + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(name)s: %(message)s', + handlers=[ + logging.StreamHandler(sys.stdout) + ] + ) + + input_file = '' + usage = 'windtunnel_json.py -i ' + + logging.info('Welcome to Studio Wind Tunnel Solver') + + try: + opts, _ = getopt.getopt(argv, "hi:o:", ["ifile="]) + except getopt.GetoptError: + logging.error(usage) + scm_set_error(64, 'Argument error') + return 64 + + for opt, arg in opts: + if opt == '-h': + logging.info(usage) + return 64 + + if opt in ("-i", "--ifile"): + input_file = arg + + if not input_file: + logging.error('Error: Input JSON file must be specified.\n' + usage) + scm_set_error(64, 'Input file not specified') + return 64 + + try: + if running_via_scm(): + log_file_scm = os.path.join(os.path.dirname(os.path.abspath(input_file)), 'solve.log') + scm_log_handler = logging.FileHandler(log_file_scm, mode='w') + scm_log_handler.setLevel(logging.INFO) + scm_log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')) + logging.getLogger().addHandler(scm_log_handler) + logging.info('SCM Log file: {}'.format(log_file_scm)) + + logging.info('Input file: {}'.format(input_file)) + + scm_init() + + prep_inputs(input_file) + + scm_complete() + except Exception as e: + logging.error(f'Exception occured: {e}') + scm_set_error(1, f'Job failed: {e}') + scm_cancel_heartbeat() + return 1 + + return 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/xlb/helper/simulation_manager.py b/xlb/helper/simulation_manager.py index a9c994d0..cc70a0ea 100644 --- a/xlb/helper/simulation_manager.py +++ b/xlb/helper/simulation_manager.py @@ -12,7 +12,7 @@ class MultiresSimulationManager(MultiresIncompressibleNavierStokesStepper): def __init__( self, - omega, + omega_finest, grid, boundary_conditions=[], collision_type="BGK", @@ -24,8 +24,8 @@ def __init__( super().__init__(grid, boundary_conditions, collision_type, forcing_scheme, force_vector) self.initializer = initializer - self.omega = omega self.count_levels = grid.count_levels + self.omega_list = [self.compute_omega(omega_finest, level) for level in range(self.count_levels)] self.mres_perf_opt = mres_perf_opt # Create fields self.rho = grid.create_field(cardinality=1, dtype=self.precision_policy.store_precision) @@ -51,6 +51,27 @@ def __init__( # Construct the stepper skeleton self._construct_stepper_skeleton() + def compute_omega(self, omega_finest, level): + """ + Compute the relaxation parameter omega at a given grid level based on the finest level omega. + We select a refinement ratio of 2 where a coarse cell at level L is uniformly divided into 2^d cells + where d is the dimension. to arrive at level L - 1, or in other words ∆x_{L-1} = ∆x_L/2. + For neighboring cells that interface two grid levels, a maximum jump in grid level of ∆L = 1 is + allowed. Due to acoustic scaling which requires the speed of sound cs to remain constant across various grid levels, + ∆tL ∝ ∆xL and hence ∆t_{L-1} = ∆t_{L}/2. In addition, the fluid viscosity \nu must also remain constant on each + grid level which leads to the following relationship for the relaxation parameter omega at grid level L base + on the finest grid level omega_finest. + + Args: + omega_finest: Relaxation parameter at the finest grid level. + level: Current grid level (0-indexed, with 0 being the finest level). + + Returns: + Relaxation parameter omega at the specified grid level. + """ + omega0 = omega_finest + return 2 ** (level + 1) * omega0 / ((2**level - 1.0) * omega0 + 2.0) + def export_macroscopic(self, fname_prefix): print(f"exporting macroscopic: #levels {self.count_levels}") self.macro(self.f_0, self.bc_mask, self.rho, self.u, streamId=0) @@ -74,6 +95,10 @@ def _construct_stepper_skeleton(self): def recursion_reference(level, app): if level < 0: return + + # Compute omega at the current level + omega = self.omega_list[level] + print(f"RECURSION down to level {level}") print(f"RECURSION Level {level}, COLLIDE") @@ -85,7 +110,7 @@ def recursion_reference(level, app): f_1=self.f_1, bc_mask=self.bc_mask, missing_mask=self.missing_mask, - omega=self.omega, + omega=omega, timestep=0, ) @@ -110,6 +135,9 @@ def recursion_fused_finest(level, app): if level < 0: return + # Compute omega at the current level + omega = self.omega_list[level] + if level == 0: print(f"RECURSION down to the finest level {level}") print(f"RECURSION Level {level}, Fused STREAM and COLLIDE") @@ -121,7 +149,7 @@ def recursion_fused_finest(level, app): f_1=self.f_1, bc_mask=self.bc_mask, missing_mask=self.missing_mask, - omega=self.omega, + omega=omega, timestep=0, is_f1_the_explosion_src_field=True, ) @@ -133,7 +161,7 @@ def recursion_fused_finest(level, app): f_1=self.f_0, bc_mask=self.bc_mask, missing_mask=self.missing_mask, - omega=self.omega, + omega=omega, timestep=0, is_f1_the_explosion_src_field=False, ) @@ -150,7 +178,7 @@ def recursion_fused_finest(level, app): f_1=self.f_1, bc_mask=self.bc_mask, missing_mask=self.missing_mask, - omega=self.omega, + omega=omega, timestep=0, ) # 1. Accumulation is read from f_0 in the streaming step, where f_0=self.f_1. @@ -186,4 +214,4 @@ def recursion_fused_finest(level, app): bk = self.grid.get_neon_backend() self.sk = neon.Skeleton(backend=bk) - self.sk.sequence("mres_nse_stepper", self.app) + self.sk.sequence("mres_nse_stepper", self.app) \ No newline at end of file diff --git a/xlb/operator/boundary_condition/bc_extrapolation_outflow.py b/xlb/operator/boundary_condition/bc_extrapolation_outflow.py index 4574f3a0..f41532cc 100644 --- a/xlb/operator/boundary_condition/bc_extrapolation_outflow.py +++ b/xlb/operator/boundary_condition/bc_extrapolation_outflow.py @@ -63,6 +63,8 @@ def __init__( # Unpack the two warp functionals needed for this BC! if self.compute_backend == ComputeBackend.WARP: self.warp_functional, self.assemble_auxiliary_data = self.warp_functional + elif self.compute_backend == ComputeBackend.NEON: + self.neon_functional, self.assemble_auxiliary_data = self.neon_functional def _get_normal_vectors(self, indices): # Get the frequency count and most common element directly @@ -173,7 +175,7 @@ def functional( return _f @wp.func - def assemble_auxiliary_data( + def assemble_auxiliary_data_warp( index: Any, timestep: Any, missing_mask: Any, @@ -199,7 +201,40 @@ def assemble_auxiliary_data( _f[_opp_indices[l]] = (self.compute_dtype(1.0) - sound_speed) * _f_pre[l] + sound_speed * f_aux return _f + @wp.func + def assemble_auxiliary_data_neon( + index: Any, + timestep: Any, + missing_mask: Any, + f_0: Any, + f_1: Any, + _f_pre: Any, + _f_post: Any, + level: Any = 0, + ): + # Prepare time-dependent dynamic data for imposing the boundary condition in the next iteration after streaming. + # We use directions that leave the domain for storing this prepared data. + # Since this function is called post-collisiotn: f_pre = f_post_stream and f_post = f_post_collision + _f = _f_post + nv = get_normal_vectors(missing_mask) + for lattice_dir in range(self.velocity_set.q): + if missing_mask[lattice_dir] == wp.uint8(1): + # f_0 is the post-collision values of the current time-step + # Get pull index associated with the "neighbours" pull_index + offset = wp.vec3i(-_c[0, lattice_dir], -_c[1, lattice_dir], -_c[2, lattice_dir]) + for d in range(self.velocity_set.d): + offset[d] = offset[d] - nv[d] + offset_pull_index = wp.neon_ngh_idx(wp.int8(offset[0]), wp.int8(offset[1]), wp.int8(offset[2])) + + # The following is the post-streaming values of the neighbor cell + # This function reads a field value at a given neighboring index and direction. + unused_is_valid = wp.bool(False) + f_aux = self.compute_dtype(wp.neon_read_ngh(f_0, index, offset_pull_index, lattice_dir, self.compute_dtype(0.0), unused_is_valid)) + _f[_opp_indices[lattice_dir]] = (self.compute_dtype(1.0) - sound_speed) * _f_pre[lattice_dir] + sound_speed * f_aux + return _f + kernel = self._construct_kernel(functional) + assemble_auxiliary_data = assemble_auxiliary_data_warp if self.compute_backend == ComputeBackend.WARP else assemble_auxiliary_data_neon return (functional, assemble_auxiliary_data), kernel @@ -212,3 +247,12 @@ def warp_implementation(self, _f_pre, _f_post, bc_mask, missing_mask): dim=_f_pre.shape[1:], ) return _f_post + + def _construct_neon(self): + functional, _ = self._construct_warp() + return functional, None + + @Operator.register_backend(ComputeBackend.NEON) + def neon_implementation(self, f_pre, f_post, bc_mask, missing_mask): + # rise exception as this feature is not implemented yet + raise NotImplementedError("This feature is not implemented in XLB with the NEON backend yet.") \ No newline at end of file diff --git a/xlb/operator/boundary_condition/bc_hybrid.py b/xlb/operator/boundary_condition/bc_hybrid.py index e6623c0f..0e6d013f 100644 --- a/xlb/operator/boundary_condition/bc_hybrid.py +++ b/xlb/operator/boundary_condition/bc_hybrid.py @@ -276,7 +276,12 @@ def hybrid_bounceback_grads( rho, u = self.macroscopic.warp_functional(f_post) # Compute Grad's approximation using full equation as in Eq (10) of Dorschner et al. - f_post = self.bc_helper.grads_approximate_fpop(_missing_mask, rho, u, f_post) + f_post = self.bc_helper.grads_approximate_fpop( + _missing_mask, + rho, + u, + f_post, + ) return f_post @wp.func @@ -297,7 +302,8 @@ def hybrid_nonequilibrium_regularized( # boundaries in the lattice Boltzmann method. Physical Review E 77, 056703. # Apply interpolated bounceback first to find missing populations at the boundary - u_wall = self.profile_functional(f_1, index, timestep) + u_wall = self.profile_functional(f_1, index, timestep) + f_post = self.bc_helper.interpolated_nonequilibrium_bounceback( index, _missing_mask, diff --git a/xlb/operator/boundary_condition/boundary_condition.py b/xlb/operator/boundary_condition/boundary_condition.py index a949ef35..24e0e635 100644 --- a/xlb/operator/boundary_condition/boundary_condition.py +++ b/xlb/operator/boundary_condition/boundary_condition.py @@ -78,25 +78,22 @@ def __init__( # Currently we support three methods based on (a) aabb method (b) ray casting and (c) winding number. self.voxelization_method = voxelization_method - if self.compute_backend == ComputeBackend.WARP: - # Set local constants TODO: This is a hack and should be fixed with warp update - _f_vec = wp.vec(self.velocity_set.q, dtype=self.compute_dtype) - _missing_mask_vec = wp.vec(self.velocity_set.q, dtype=wp.uint8) # TODO fix vec bool - - @wp.func - def assemble_auxiliary_data( - index: Any, - timestep: Any, - missing_mask: Any, - f_0: Any, - f_1: Any, - f_pre: Any, - f_post: Any, - ): - return f_post + # Construct a default warp functional for assembling auxiliary data if needed + if self.compute_backend in [ComputeBackend.WARP, ComputeBackend.NEON]: + + @wp.func + def assemble_auxiliary_data( + index: Any, + timestep: Any, + missing_mask: Any, + f_0: Any, + f_1: Any, + f_pre: Any, + f_post: Any, + level: Any = 0, + ): + return f_post - # Construct some helper warp functions for getting tid data - if self.compute_backend == ComputeBackend.WARP: self.assemble_auxiliary_data = assemble_auxiliary_data def pad_indices(self): @@ -156,4 +153,4 @@ def kernel( for l in range(self.velocity_set.q): f_post[l, index[0], index[1], index[2]] = self.store_dtype(_f[l]) - return kernel + return kernel \ No newline at end of file diff --git a/xlb/operator/boundary_condition/helper_functions_bc.py b/xlb/operator/boundary_condition/helper_functions_bc.py index 8e7ae7b9..24038866 100644 --- a/xlb/operator/boundary_condition/helper_functions_bc.py +++ b/xlb/operator/boundary_condition/helper_functions_bc.py @@ -143,7 +143,8 @@ def regularize_fpop( """ # Compute momentum flux of off-equilibrium populations for regularization: Pi^1 = Pi^{neq} f_neq = fpop - feq - PiNeq = momentum_flux.warp_functional(f_neq) + PiNeq = momentum_flux.warp_functional(f_neq) + epsilon = compute_dtype(1e-7) # Compute double dot product Qi:Pi1 (where Pi1 = PiNeq) nt = _d * (_d + 1) // 2 @@ -156,6 +157,7 @@ def regularize_fpop( # fneq ~ f^1 fpop1 = compute_dtype(4.5) * _w[l] * QiPi1 fpop[l] = feq[l] + fpop1 + fpop[l] = wp.max(epsilon, fpop[l]) return fpop @wp.func @@ -176,33 +178,46 @@ def grads_approximate_fpop( # Compute pressure tensor Pi using all f_post-streaming values Pi = momentum_flux.warp_functional(f_post) + epsilon = compute_dtype(1e-7) + zero = compute_dtype(0.0) + one = compute_dtype(1.0) + three = compute_dtype(3.0) + four_pt_five = compute_dtype(4.5) + missing_count = zero + + for l in range(_q): + if _missing_mask[l] == wp.uint8(1): + missing_count += one + scale = one - ((one + missing_count) / compute_dtype(_q)) # Compute double dot product Qi:Pi1 (where Pi1 = PiNeq) nt = _d * (_d + 1) // 2 for l in range(_q): if _missing_mask[l] == wp.uint8(1): # compute dot product of qi and Pi - QiPi = compute_dtype(0.0) + QiPi = zero for t in range(nt): if t == 0 or t == 3 or t == 5: - QiPi += _qi[l, t] * (Pi[t] - rho / compute_dtype(3.0)) + QiPi += _qi[l, t] * (Pi[t] - rho / three) else: QiPi += _qi[l, t] * Pi[t] # Compute c.u - cu = compute_dtype(0.0) + cu = zero for d in range(_d): - if _c[d, l] == 1: - cu += u[d] - elif _c[d, l] == -1: - cu -= u[d] - cu *= compute_dtype(3.0) + cu += _c_float[d, l] * u[d] + cu *= three # change f_post using the Grad's approximation - f_post[l] = rho * _w[l] * (compute_dtype(1.0) + cu) + _w[l] * compute_dtype(4.5) * QiPi + f_post[l] = rho * _w[l] * (one + cu) + _w[l] * four_pt_five * QiPi * scale - return f_post + f_post[l] = wp.max(epsilon, f_post[l]) + else: + f_post[l] = wp.max(epsilon, f_post[l]) + return f_post + + @wp.func def moving_wall_fpop_correction( u_wall: Any, @@ -280,6 +295,7 @@ def interpolated_nonequilibrium_bounceback( needs_mesh_distance: bool, ): # Compute density, velocity using all f_post-collision values + epsilon = compute_dtype(1e-7) rho, u = macroscopic.warp_functional(f_pre) feq = equilibrium.warp_functional(rho, u) @@ -291,6 +307,7 @@ def interpolated_nonequilibrium_bounceback( # Apply method in Tao et al (2018) [1] to find missing populations at the boundary one = compute_dtype(1.0) + half = compute_dtype(0.5) for l in range(_q): # If the mask is missing then take the opposite index if _missing_mask[l] == wp.uint8(1): @@ -299,7 +316,7 @@ def interpolated_nonequilibrium_bounceback( # use weights associated with curved boundaries that are properly stored in f_1. weight = compute_dtype(self.distance_decoder_function(f_1, index, l)) else: - weight = compute_dtype(0.5) + weight = half # Use non-equilibrium bounceback to find f_missing: fneq = f_pre[_opp_indices[l]] - feq[_opp_indices[l]] @@ -313,6 +330,10 @@ def interpolated_nonequilibrium_bounceback( f_wall = feq_wall[l] + fneq f_post[l] = (f_wall + weight * f_pre[l]) / (one + weight) + f_post[l] = wp.max(epsilon, f_post[l]) + else: + f_post[l] = wp.max(epsilon, f_post[l]) + return f_post @wp.func diff --git a/xlb/operator/collision/kbc.py b/xlb/operator/collision/kbc.py index c6528a3d..fcb910df 100644 --- a/xlb/operator/collision/kbc.py +++ b/xlb/operator/collision/kbc.py @@ -29,7 +29,7 @@ def __init__( compute_backend=None, ): self.momentum_flux = MomentumFlux() - self.epsilon = 1e-32 + self.epsilon = 1e-7 super().__init__( velocity_set=velocity_set, @@ -218,34 +218,40 @@ def decompose_shear_d3q27( s = _f_vec() # For c = (i, 0, 0), c = (0, j, 0) and c = (0, 0, k) - two = self.compute_dtype(2.0) - four = self.compute_dtype(4.0) - six = self.compute_dtype(6.0) - - s[9] = (two * nxz - nyz) / six - s[18] = (two * nxz - nyz) / six - s[3] = (-nxz + two * nyz) / six - s[6] = (-nxz + two * nyz) / six - s[1] = (-nxz - nyz) / six - s[2] = (-nxz - nyz) / six + _two = self.compute_dtype(2.0) + _inv_four = self.compute_dtype(1.0/4.0) + _inv_six = self.compute_dtype(1.0/6.0) + shear1_inv6 = (_two * nxz - nyz) * _inv_six + shear2_inv6 = (-nxz + _two * nyz) * _inv_six + shear3_inv6 = (-nxz - nyz) * _inv_six + pi_invFour1 = pi[1] * _inv_four + pi_invFour2 = pi[2] * _inv_four + pi_invFour4 = pi[2] * _inv_four + + s[9] = shear1_inv6 + s[18] = shear1_inv6 + s[3] = shear2_inv6 + s[6] = shear2_inv6 + s[1] = shear3_inv6 + s[2] = shear3_inv6 # For c = (i, j, 0) - s[12] = pi[1] / four - s[24] = pi[1] / four - s[21] = -pi[1] / four - s[15] = -pi[1] / four + s[12] = pi_invFour1 + s[24] = pi_invFour1 + s[21] = -pi_invFour1 + s[15] = -pi_invFour1 # For c = (i, 0, k) - s[10] = pi[2] / four - s[20] = pi[2] / four - s[19] = -pi[2] / four - s[11] = -pi[2] / four + s[10] = pi_invFour2 + s[20] = pi_invFour2 + s[19] = -pi_invFour2 + s[11] = -pi_invFour2 # For c = (0, j, k) - s[8] = pi[4] / four - s[4] = pi[4] / four - s[7] = -pi[4] / four - s[5] = -pi[4] / four + s[8] = pi_invFour4 + s[4] = pi_invFour4 + s[7] = -pi_invFour4 + s[5] = -pi_invFour4 return s @@ -257,11 +263,31 @@ def compute_entropic_scalar_products( feq: Any, ): temp = wp.cw_div(delta_h, feq) - sp1 = self.compute_dtype(0.0) - sp2 = self.compute_dtype(0.0) + sum_val1 = self.compute_dtype(0.0) + c1 = self.compute_dtype(0.0) for i in range(self.velocity_set.q): - sp1 += temp[i] * delta_s[i] - sp2 += temp[i] * delta_h[i] + x1 = temp[i] * delta_s[i] + t1 = sum_val1 + x1 + if wp.abs(sum_val1) >= wp.abs(x1): + c1 += (sum_val1 - t1) + x1 + else: + c1 += (x1 - t1) + sum_val1 + sum_val1 = t1 + sp1 = sum_val1 + c1 + + # Neumaier summation for sp2 + sum_val2 = self.compute_dtype(0.0) + c2 = self.compute_dtype(0.0) + for i in range(self.velocity_set.q): + x2 = temp[i] * delta_h[i] + t2 = sum_val2 + x2 + if wp.abs(sum_val2) >= wp.abs(x2): + c2 += (sum_val2 - t2) + x2 + else: + c2 += (x2 - t2) + sum_val2 + sum_val2 = t2 + sp2 = sum_val2 + c2 + return sp1, sp2 # Construct the functional diff --git a/xlb/operator/equilibrium/quadratic_equilibrium.py b/xlb/operator/equilibrium/quadratic_equilibrium.py index 646d1425..9266e527 100644 --- a/xlb/operator/equilibrium/quadratic_equilibrium.py +++ b/xlb/operator/equilibrium/quadratic_equilibrium.py @@ -33,13 +33,247 @@ def jax_implementation(self, rho, u): def _construct_warp(self): # Set local constants TODO: This is a hack and should be fixed with warp update _c = self.velocity_set.c + _d = self.velocity_set.d + _q = self.velocity_set.q _w = self.velocity_set.w _f_vec = wp.vec(self.velocity_set.q, dtype=self.compute_dtype) _u_vec = wp.vec(self.velocity_set.d, dtype=self.compute_dtype) # Construct the equilibrium functional + # D2Q9 Kernel (2D, 9 directions) @wp.func - def functional( + def functional_d2q9( + rho: Any, + u: Any, + ): + # Precompute constants + zero = self.compute_dtype(0.0) + half = self.compute_dtype(0.5) + one = self.compute_dtype(1.0) + one_half = self.compute_dtype(1.5) + three = self.compute_dtype(3.0) + + # Allocate the equilibrium distribution array + feq = _f_vec() + + # Compute usqr (velocity magnitude term) + usqr = one_half * (u[0] * u[0] + u[1] * u[1]) + + # Rest particle: l=0 (0,0), w[0] = 4/9 + cu = zero + base = one + half * cu * cu - usqr + feq[0] = rho * _w[0] * base # Simplifies to rho * w[0] * (1 - usqr) + + # Pair 1: l=1 (0,1) and l=2 (0,-1), w[1] = w[2] = 1/9 + cu_l = three * u[1] + base = one + half * cu_l * cu_l - usqr + feq[1] = rho * _w[1] * (base + cu_l) + feq[2] = rho * _w[2] * (base - cu_l) + + # Pair 2: l=3 (1,0) and l=4 (-1,0), w[3] = w[4] = 1/9 + cu_l = three * u[0] + base = one + half * cu_l * cu_l - usqr + feq[3] = rho * _w[3] * (base + cu_l) + feq[4] = rho * _w[4] * (base - cu_l) + + # Pair 3: l=5 (1,1) and l=8 (-1,-1), w[5] = w[8] = 1/36 + cu_l = three * (u[0] + u[1]) + base = one + half * cu_l * cu_l - usqr + feq[5] = rho * _w[5] * (base + cu_l) + feq[8] = rho * _w[8] * (base - cu_l) + + # Pair 4: l=6 (-1,1) and l=7 (1,-1), w[6] = w[7] = 1/36 + cu_l = three * (-u[0] + u[1]) + base = one + half * cu_l * cu_l - usqr + feq[6] = rho * _w[6] * (base + cu_l) + feq[7] = rho * _w[7] * (base - cu_l) + + return feq + # D3Q19 Kernel (3D, 19 directions) + @wp.func + def functional_d3q19( + rho: Any, + u: Any, + ): + # Precompute constants + zero = self.compute_dtype(0.0) + half = self.compute_dtype(0.5) + one = self.compute_dtype(1.0) + one_half = self.compute_dtype(1.5) + three = self.compute_dtype(3.0) + + # Allocate the equilibrium distribution array + feq = _f_vec() + + # Compute usqr (velocity magnitude term) + usqr = one_half * (u[0] * u[0] + u[1] * u[1] + u[2] * u[2]) + + # Rest particle: l=0 (0,0,0), w[0] = 1/3 + cu = zero + base = one + half * cu * cu - usqr + feq[0] = rho * _w[0] * base + + # Pair 1: l=1 (0,0,-1) and l=2 (0,0,1), w[1] = w[2] = 1/18 + cu_l = three * (-u[2]) + base = one + half * cu_l * cu_l - usqr + feq[1] = rho * _w[1] * (base + cu_l) + feq[2] = rho * _w[2] * (base - cu_l) + + # Pair 2: l=3 (0,-1,0) and l=6 (0,1,0), w[3] = w[6] = 1/18 + cu_l = three * (-u[1]) + base = one + half * cu_l * cu_l - usqr + feq[3] = rho * _w[3] * (base + cu_l) + feq[6] = rho * _w[6] * (base - cu_l) + + # Pair 3: l=4 (0,-1,-1) and l=8 (0,1,1), w[4] = w[8] = 1/36 + cu_l = three * (-u[1] - u[2]) + base = one + half * cu_l * cu_l - usqr + feq[4] = rho * _w[4] * (base + cu_l) + feq[8] = rho * _w[8] * (base - cu_l) + + # Pair 4: l=5 (0,-1,1) and l=7 (0,1,-1), w[5] = w[7] = 1/36 + cu_l = three * (-u[1] + u[2]) + base = one + half * cu_l * cu_l - usqr + feq[5] = rho * _w[5] * (base + cu_l) + feq[7] = rho * _w[7] * (base - cu_l) + + # Pair 5: l=9 (-1,0,0) and l=14 (1,0,0), w[9] = w[14] = 1/18 + cu_l = three * (-u[0]) + base = one + half * cu_l * cu_l - usqr + feq[9] = rho * _w[9] * (base + cu_l) + feq[14] = rho * _w[14] * (base - cu_l) + + # Pair 6: l=10 (-1,0,-1) and l=16 (1,0,1), w[10] = w[16] = 1/36 + cu_l = three * (-u[0] - u[2]) + base = one + half * cu_l * cu_l - usqr + feq[10] = rho * _w[10] * (base + cu_l) + feq[16] = rho * _w[16] * (base - cu_l) + + # Pair 7: l=11 (-1,0,1) and l=15 (1,0,-1), w[11] = w[15] = 1/36 + cu_l = three * (-u[0] + u[2]) + base = one + half * cu_l * cu_l - usqr + feq[11] = rho * _w[11] * (base + cu_l) + feq[15] = rho * _w[15] * (base - cu_l) + + # Pair 8: l=12 (-1,-1,0) and l=18 (1,1,0), w[12] = w[18] = 1/36 + cu_l = three * (-u[0] - u[1]) + base = one + half * cu_l * cu_l - usqr + feq[12] = rho * _w[12] * (base + cu_l) + feq[18] = rho * _w[18] * (base - cu_l) + + # Pair 9: l=13 (-1,1,0) and l=17 (1,-1,0), w[13] = w[17] = 1/36 + cu_l = three * (-u[0] + u[1]) + base = one + half * cu_l * cu_l - usqr + feq[13] = rho * _w[13] * (base + cu_l) + feq[17] = rho * _w[17] * (base - cu_l) + + return feq + # D3Q27 Kernel (3D, 27 directions) + @wp.func + def functional_d3q27(rho: Any, + u: Any, + ): + + # Precompute constants + zero = self.compute_dtype(0.0) + half = self.compute_dtype(0.5) + one = self.compute_dtype(1.0) + one_half = self.compute_dtype(1.5) + three = self.compute_dtype(3.0) + + # Allocate the equilibrium distribution array + feq = _f_vec() + + # Compute usqr once (velocity magnitude term) + usqr = one_half * wp.dot(u, u) + + # Rest particle: l=0 (0,0,0) - No opposite + cu = zero + base = one + half * cu * cu - usqr + feq[0] = rho * _w[0] * base # cu = 0, so feq[0] = rho * w[0] * (1 - usqr) + + # Pair 1: l=1 (0,0,-1) and l=2 (0,0,1) + cu_l = -three * u[2] + base = one + half * cu_l * cu_l - usqr + feq[1] = rho * _w[1] * (base + cu_l) + feq[2] = rho * _w[2] * (base - cu_l) + + # Pair 2: l=3 (0,-1,0) and l=6 (0,1,0) + cu_l = -three * u[1] + base = one + half * cu_l * cu_l - usqr + feq[3] = rho * _w[3] * (base + cu_l) + feq[6] = rho * _w[6] * (base - cu_l) + + # Pair 3: l=4 (0,-1,-1) and l=8 (0,1,1) + cu_l = three * (-u[1] - u[2]) + base = one + half * cu_l * cu_l - usqr + feq[4] = rho * _w[4] * (base + cu_l) + feq[8] = rho * _w[8] * (base - cu_l) + + # Pair 4: l=5 (0,-1,1) and l=7 (0,1,-1) + cu_l = three * (-u[1] + u[2]) + base = one + half * cu_l * cu_l - usqr + feq[5] = rho * _w[5] * (base + cu_l) + feq[7] = rho * _w[7] * (base - cu_l) + + # Pair 5: l=9 (-1,0,0) and l=18 (1,0,0) + cu_l = -three * u[0] + base = one + half * cu_l * cu_l - usqr + feq[9] = rho * _w[9] * (base + cu_l) + feq[18] = rho * _w[18] * (base - cu_l) + + # Pair 6: l=10 (-1,0,-1) and l=20 (1,0,1) + cu_l = three * (-u[0] - u[2]) + base = one + half * cu_l * cu_l - usqr + feq[10] = rho * _w[10] * (base + cu_l) + feq[20] = rho * _w[20] * (base - cu_l) + + # Pair 7: l=11 (-1,0,1) and l=19 (1,0,-1) + cu_l = three * (-u[0] + u[2]) + base = one + half * cu_l * cu_l - usqr + feq[11] = rho * _w[11] * (base + cu_l) + feq[19] = rho * _w[19] * (base - cu_l) + + # Pair 8: l=12 (-1,-1,0) and l=24 (1,1,0) + cu_l = three * (-u[0] - u[1]) + base = one + half * cu_l * cu_l - usqr + feq[12] = rho * _w[12] * (base + cu_l) + feq[24] = rho * _w[24] * (base - cu_l) + + # Pair 9: l=13 (-1,-1,-1) and l=26 (1,1,1) + cu_l = three * (-u[0] - u[1] - u[2]) + base = one + half * cu_l * cu_l - usqr + feq[13] = rho * _w[13] * (base + cu_l) + feq[26] = rho * _w[26] * (base - cu_l) + + # Pair 10: l=14 (-1,-1,1) and l=25 (1,1,-1) + cu_l = three * (-u[0] - u[1] + u[2]) + base = one + half * cu_l * cu_l - usqr + feq[14] = rho * _w[14] * (base + cu_l) + feq[25] = rho * _w[25] * (base - cu_l) + + # Pair 11: l=15 (-1,1,0) and l=21 (1,-1,0) + cu_l = three * (-u[0] + u[1]) + base = one + half * cu_l * cu_l - usqr + feq[15] = rho * _w[15] * (base + cu_l) + feq[21] = rho * _w[21] * (base - cu_l) + + # Pair 12: l=16 (-1,1,-1) and l=23 (1,-1,1) + cu_l = three * (-u[0] + u[1] - u[2]) + base = one + half * cu_l * cu_l - usqr + feq[16] = rho * _w[16] * (base + cu_l) + feq[23] = rho * _w[23] * (base - cu_l) + + # Pair 13: l=17 (-1,1,1) and l=22 (1,-1,-1) + cu_l = three * (-u[0] + u[1] + u[2]) + base = one + half * cu_l * cu_l - usqr + feq[17] = rho * _w[17] * (base + cu_l) + feq[22] = rho * _w[22] * (base - cu_l) + + return feq + + @wp.func + def functional_loop( rho: Any, u: Any, ): @@ -64,6 +298,16 @@ def functional( feq[l] = rho * _w[l] * (self.compute_dtype(1.0) + cu * (self.compute_dtype(1.0) + self.compute_dtype(0.5) * cu) - usqr) return feq + + # Determine the lattice type and return the appropriate kernel + if _d == 2 and _q == 9: + functional = functional_d2q9 + elif _d == 3 and _q == 19: + functional = functional_d3q19 + elif _d == 3 and _q == 27: + functional = functional_d3q27 + else: + functional = functional_loop # Construct the warp kernel @wp.kernel diff --git a/xlb/operator/force/momentum_transfer.py b/xlb/operator/force/momentum_transfer.py index c5de920c..9bf9850d 100644 --- a/xlb/operator/force/momentum_transfer.py +++ b/xlb/operator/force/momentum_transfer.py @@ -213,7 +213,8 @@ def jax_implementation(self, f_0, f_1, bc_mask, missing_mask): def _construct_warp(self): # Set local constants - _c = self.velocity_set.c + _c = self.velocity_set.c + _c_float = self.velocity_set.c_float _opp_indices = self.velocity_set.opp_indices _u_vec = wp.vec(self.velocity_set.d, dtype=self.compute_dtype) _missing_mask_vec = wp.vec(self.velocity_set.q, dtype=wp.uint8) @@ -255,10 +256,8 @@ def functional( for l in range(self.velocity_set.q): if _missing_mask[l] == wp.uint8(1): phi = f_post_collision[_opp_indices[l]] + f_post_stream[l] - if _c[d, _opp_indices[l]] == 1: - m[d] += phi - elif _c[d, _opp_indices[l]] == -1: - m[d] -= phi + m[d] += phi *_c_float[d, _opp_indices[l]] + # Atomic sum to get the total force vector wp.atomic_add(force, 0, m) diff --git a/xlb/operator/macroscopic/first_moment.py b/xlb/operator/macroscopic/first_moment.py index 2842ec1e..ecec4e9d 100644 --- a/xlb/operator/macroscopic/first_moment.py +++ b/xlb/operator/macroscopic/first_moment.py @@ -19,6 +19,7 @@ def jax_implementation(self, f, rho): def _construct_warp(self): _c = self.velocity_set.c + _c_float = self.velocity_set.c_float _f_vec = wp.vec(self.velocity_set.q, dtype=self.compute_dtype) _u_vec = wp.vec(self.velocity_set.d, dtype=self.compute_dtype) @@ -28,12 +29,7 @@ def neumaier_sum_component(d: int, f: _f_vec): compensation = self.compute_dtype(0.0) for l in range(self.velocity_set.q): # Get contribution based on the sign of _c[d, l] - if _c[d, l] == 1: - val = f[l] - elif _c[d, l] == -1: - val = -f[l] - else: - val = self.compute_dtype(0.0) + val = _c_float[d, l] * f[l] t = total + val if wp.abs(total) >= wp.abs(val): compensation = compensation + ((total - t) + val) diff --git a/xlb/operator/macroscopic/second_moment.py b/xlb/operator/macroscopic/second_moment.py index ee74bdd9..c42aa1ad 100644 --- a/xlb/operator/macroscopic/second_moment.py +++ b/xlb/operator/macroscopic/second_moment.py @@ -72,9 +72,17 @@ def functional( # Get second order moment (a symmetric tensore shaped into a vector) pi = _pi_vec() for d in range(_pi_dim): - pi[d] = self.compute_dtype(0.0) + sum_val = self.compute_dtype(0.0) + c = self.compute_dtype(0.0) for q in range(self.velocity_set.q): - pi[d] += _cc[q, d] * fneq[q] + x = _cc[q, d] * fneq[q] + t = sum_val + x + if wp.abs(sum_val) >= wp.abs(x): + c += (sum_val - t) + x + else: + c += (x - t) + sum_val + sum_val = t + pi[d] = sum_val + c return pi # Construct the kernel diff --git a/xlb/operator/macroscopic/zero_moment.py b/xlb/operator/macroscopic/zero_moment.py index 0b7cd7b7..40005af1 100644 --- a/xlb/operator/macroscopic/zero_moment.py +++ b/xlb/operator/macroscopic/zero_moment.py @@ -21,6 +21,7 @@ def _construct_warp(self): @wp.func def neumaier_sum(f: _f_vec): + epsilon = self.compute_dtype(1e-7) total = self.compute_dtype(0.0) compensation = self.compute_dtype(0.0) for l in range(self.velocity_set.q): @@ -32,7 +33,8 @@ def neumaier_sum(f: _f_vec): else: compensation = compensation + ((x - t) + total) total = t - return total + compensation + _rho = total + compensation + return wp.max(_rho, epsilon) @wp.func def functional(f: _f_vec): diff --git a/xlb/operator/stepper/nse_multires_stepper.py b/xlb/operator/stepper/nse_multires_stepper.py index 79d07c08..deb5a018 100644 --- a/xlb/operator/stepper/nse_multires_stepper.py +++ b/xlb/operator/stepper/nse_multires_stepper.py @@ -324,7 +324,7 @@ def apply_bc( f_result = wp.static(self.boundary_conditions[i].neon_functional)(index, timestep, _missing_mask, f_0, f_1, f_pre, f_post) if wp.static(self.boundary_conditions[i].id in extrapolation_outflow_bc_ids): if _boundary_id == wp.static(self.boundary_conditions[i].id): - f_result = wp.static(self.boundary_conditions[i].prepare_bc_auxilary_data)( + f_result = wp.static(self.boundary_conditions[i].assemble_auxiliary_data)( index, timestep, _missing_mask, f_0, f_1, f_pre, f_post ) return f_result @@ -498,7 +498,10 @@ def cl_stream_coarse(index: Any): _missing_mask = _missing_mask_vec() _f0_thread, _missing_mask = neon_get_thread_data(f_0_pn, missing_mask_pn, index) _f_post_collision = _f0_thread - _f_post_stream = self.stream.neon_functional(f_0_pn, index) + # _f_post_stream = self.stream.neon_functional(f_0_pn, index) + _f_post_stream = _f_vec() + _f_post_stream[lattice_central_index] = wp.neon_read(f_0_pn, index, lattice_central_index) + for l in range(self.velocity_set.q): if l == lattice_central_index: @@ -531,6 +534,9 @@ def cl_stream_coarse(index: Any): # -> **Explosion** # wp.neon_write(f_1_pn, index, l, exploded_pop) _f_post_stream[l] = exploded_pop + + else: # we have ngh at same level just stream? + _f_post_stream[l] = accumulated else: # HERE -> I have a finer ngh. in direction pull (opposite l) # Then I have to read from the halo on top of my finer ngh. @@ -763,7 +769,9 @@ def finest_fused_pull_kernel(index: Any): _missing_mask = _missing_mask_vec() _f0_thread, _missing_mask = neon_get_thread_data(f_0_pn, missing_mask_pn, index) _f_post_collision = _f0_thread - _f_post_stream = self.stream.neon_functional(f_0_pn, index) + # _f_post_stream = self.stream.neon_functional(f_0_pn, index) + _f_post_stream = _f_vec() + _f_post_stream[lattice_central_index] = wp.neon_read(f_0_pn, index, lattice_central_index) for l in range(self.velocity_set.q): if l == lattice_central_index: @@ -799,6 +807,8 @@ def finest_fused_pull_kernel(index: Any): # -> **Explosion** # wp.neon_write(f_1_pn, index, l, exploded_pop) _f_post_stream[l] = exploded_pop + else: # we have ngh at same level just stream? + _f_post_stream[l] = accumulated # do non mres post-streaming corrections _f_post_stream = apply_bc(index, timestep, _boundary_id, _missing_mask, f_0_pn, f_1_pn, _f_post_collision, _f_post_stream, True) diff --git a/xlb/operator/stepper/nse_stepper.py b/xlb/operator/stepper/nse_stepper.py index e4c2e6fc..f275d58d 100644 --- a/xlb/operator/stepper/nse_stepper.py +++ b/xlb/operator/stepper/nse_stepper.py @@ -40,7 +40,7 @@ def __init__( collision_type="BGK", forcing_scheme="exact_difference", force_vector=None, - backend_config=None, + backend_config={}, ): super().__init__(grid, boundary_conditions) self.backend_config = backend_config @@ -420,7 +420,7 @@ def apply_bc( f_result = wp.static(self.boundary_conditions[i].neon_functional)(index, timestep, _missing_mask, f_0, f_1, f_pre, f_post) if wp.static(self.boundary_conditions[i].id in extrapolation_outflow_bc_ids): if _boundary_id == wp.static(self.boundary_conditions[i].id): - f_result = wp.static(self.boundary_conditions[i].assemble_dynamic_data)( + f_result = wp.static(self.boundary_conditions[i].assemble_auxiliary_data)( index, timestep, _missing_mask, f_0, f_1, f_pre, f_post ) return f_result @@ -532,6 +532,8 @@ def nse_stepper_cl(index: Any): @Operator.register_backend(ComputeBackend.NEON) def neon_launch(self, f_0, f_1, bc_mask, missing_mask, omega, timestep): + if timestep == 0: + self.prepare_skeleton(f_0, f_1, bc_mask, missing_mask, omega) self.sk[self.sk_iter].run() self.sk_iter = (self.sk_iter + 1) % 2 return f_0, f_1 @@ -544,7 +546,7 @@ def prepare_skeleton(self, f_0, f_1, bc_mask, missing_mask, omega): self.neon_skeleton["even"]["container"] = self.neon_container(f_1, f_0, bc_mask, missing_mask, omega, 1) # check if 'occ' is a valid key if "occ" not in self.backend_config: - occ = neon.SkeletonConfig.none() + occ = neon.SkeletonConfig.OCC.none() else: occ = self.backend_config["occ"] # check that occ is of type neon.SkeletonConfig.OCC @@ -558,4 +560,4 @@ def prepare_skeleton(self, f_0, f_1, bc_mask, missing_mask, omega): self.neon_skeleton[key]["skeleton"].sequence(name="mres_nse_stepper", containers=self.neon_skeleton[key]["app"], occ=occ) self.sk = [self.neon_skeleton["odd"]["skeleton"], self.neon_skeleton["even"]["skeleton"]] - self.sk_iter = 0 + self.sk_iter = 0 \ No newline at end of file diff --git a/xlb/utils/__init__.py b/xlb/utils/__init__.py index 7af8f80c..25eb470a 100644 --- a/xlb/utils/__init__.py +++ b/xlb/utils/__init__.py @@ -8,3 +8,4 @@ axangle2mat, ) from .mesher import make_cuboid_mesh, MultiresIO +from .makemesh import makeMesh diff --git a/xlb/utils/makemesh.py b/xlb/utils/makemesh.py new file mode 100644 index 00000000..38f3e8e5 --- /dev/null +++ b/xlb/utils/makemesh.py @@ -0,0 +1,690 @@ +import numpy as np +import open3d as o3d +from typing import Any +import time +from pathlib import Path +from tabulate import tabulate + +import neon +import warp as wp + +DEVICE = "cuda" + +def generate_mesh( + levels, + stl_name, + voxSize, + padding_table=None, + domainMultiplier=None, + close=True, + ground_refinement_level=-1, + ground_voxel_height=4, + downsample=-1, +): + """ + Generate a multi-resolution voxel grid based on an STL file. + + This function serves as a high-level interface to create a multi-resolution voxel grid by + voxelizing an STL file and processing it across multiple resolution levels. + + Parameters + ---------- + levels : int + The number of resolution levels in the multi-resolution grid. Must be a positive integer. + stl_name : str + The file path or name of the STL file to be voxelized (e.g., "sphere.stl"). + voxSize : float + The voxel size at the finest resolution level, in meters. Must be positive. + padding_table : list of lists + A list where each inner list contains six integers [xn, xp, yn, yp, zn, zp] + representing padding in negative and positive x, y, z directions for each level. + domainMultiplier : dictionary + A 6-element dictionary array specifying scale in a given bounding box direction 'x', '-x', etc + close : bool, optional + If True, applies a closing operation to fill gaps or islands in the voxel grid at each level. + Default is True. + ground_refinement_level : int, optional + The level at which to apply ground refinement (e.g., adding a solid ground layer). + If -1, no ground refinement is performed. Default is -1. + ground_voxel_height : int, optional + The number of voxels to use during ground_refinement. + downsample : int, optional + The highest level (inclusive) to downsample when saving data, doubling voxel sizes for levels + 0 to `downsample`. If -1, no downsampling is applied. Default is -1. + + Returns + ------- + level_data : list of tuples + A list where each tuple corresponds to a resolution level and contains: + - dr : numpy.ndarray + The voxel matrix (3D boolean array) for the level. + - v : float + The voxel size for the level, in meters (adjusted for level). + - dOrigin : numpy.ndarray + The origin coordinates (x, y, z) of the voxel grid for the level, in meters. + - l : int + The level number (0 to `levels - 1`). + """ + if not padding_table: + padding_table = [ + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + [2, 2, 2, 2, 2, 2], + ] + if not domainMultiplier: + domainMultiplier = { + "-x": 0, + "x": 0, + "-y": 0, + "y": 0, + "z": 0, + "-z": 0, + } + kernel = calculate_kernel(padding_table) + return makeMesh( + levels, + stl_name, + voxSize, + kernel, + domainMultiplier, + close=close, + ground_refinement_level=ground_refinement_level, + ground_voxel_height=ground_voxel_height, + downsample=downsample, + ) + + +# Warp kernels for voxel operations: grow, fill, remove, and crop + + +# Copy input matrix into a padded matrix on GPU +@wp.kernel +def copy_to_padded_kernel(input: wp.array3d(dtype=wp.uint8), padded: wp.array3d(dtype=wp.uint8), pad_x: int, pad_y: int, pad_z: int): + i, j, k = wp.tid() + if i < input.shape[0] and j < input.shape[1] and k < input.shape[2]: + padded[i + pad_x, j + pad_y, k + pad_z] = input[i, j, k] + + +# Apply convolution to a padded matrix on GPU +@wp.kernel +def convolution_kernel( + padded: wp.array3d(dtype=wp.uint8), kernel: wp.array3d(dtype=wp.uint8), output: wp.array3d(dtype=wp.uint8), kx: int, ky: int, kz: int +): + i, j, k = wp.tid() + if i >= padded.shape[0] or j >= padded.shape[1] or k >= padded.shape[2]: + return + sum_val = wp.uint8(0) + for di in range(kx): + for dj in range(ky): + for dk in range(kz): + if kernel[di, dj, dk] != 0: + ii = i + di - kx // 2 + jj = j + dj - ky // 2 + kk = k + dk - kz // 2 + if 0 <= ii < padded.shape[0] and 0 <= jj < padded.shape[1] and 0 <= kk < padded.shape[2]: + sum_val += padded[ii, jj, kk] + if sum_val > 0: + output[i, j, k] = wp.uint8(1) + else: + output[i, j, k] = wp.uint8(0) + + +# Expand a voxel matrix using convolution-based growth on GPU +def grow_gpu(matrix, voxSize, origin, kernel): + pad = (np.array(kernel.shape) * 0.5).astype(int) + print(" Grow padding: ", pad) + wp_matrix = wp.array(matrix.astype(np.uint8), dtype=wp.uint8, device=DEVICE) + wp_kernel = wp.array(kernel.astype(np.uint8), dtype=wp.uint8, device=DEVICE) + padded_shape = tuple(np.array(matrix.shape) + 2 * pad) + padded = wp.zeros(padded_shape, dtype=wp.uint8, device=DEVICE) + nx, ny, nz = matrix.shape + wp.launch(kernel=copy_to_padded_kernel, dim=(nx, ny, nz), inputs=[wp_matrix, padded, pad[0], pad[1], pad[2]], device=DEVICE) + output = wp.zeros(padded_shape, dtype=wp.uint8, device=DEVICE) + nx, ny, nz = padded_shape + kx, ky, kz = kernel.shape + wp.launch(kernel=convolution_kernel, dim=(nx, ny, nz), inputs=[padded, wp_kernel, output, kx, ky, kz], device=DEVICE) + wp.synchronize() + r = output.numpy().astype(bool) + kernel_shape = np.array(kernel.shape) + originPad = origin - (kernel_shape - 1) * voxSize * 0.5 + return r, originPad + + +# Compute OR of 2x2x2 blocks in a padded matrix on GPU +@wp.kernel +def compute_or_blocks(padded: wp.array3d(dtype=wp.uint8), or_results: wp.array3d(dtype=wp.uint8)): + bx, by, bz = wp.tid() + if bx >= or_results.shape[0] or by >= or_results.shape[1] or bz >= or_results.shape[2]: + return + result = wp.uint8(0) + for di in range(2): + for dj in range(2): + for dk in range(2): + if padded[bx * 2 + di, by * 2 + dj, bz * 2 + dk] != 0: + result = wp.uint8(1) + break + if result != 0: + break + if result != 0: + break + or_results[bx, by, bz] = result + + +# Perform binary dilation with a cross-shaped element on GPU +@wp.kernel +def binary_dilation_cross(input: wp.array3d(dtype=wp.uint8), output: wp.array3d(dtype=wp.uint8)): + i, j, k = wp.tid() + if i >= input.shape[0] or j >= input.shape[1] or k >= input.shape[2]: + return + result = wp.uint8(0) + for dx in range(-1, 2): + for dy in range(-1, 2): + for dz in range(-1, 2): + if (dx == 0 and dy == 0 and dz == 0) or (abs(dx) + abs(dy) + abs(dz) == 1): + ii = wp.clamp(i + dx, 0, input.shape[0] - 1) + jj = wp.clamp(j + dy, 0, input.shape[1] - 1) + kk = wp.clamp(k + dz, 0, input.shape[2] - 1) + if input[ii, jj, kk] != 0: + result = wp.uint8(1) + break + if result != 0: + break + if result != 0: + break + output[i, j, k] = result + + +# Perform binary erosion with a cross-shaped element on GPU +@wp.kernel +def binary_erosion_cross(input: wp.array3d(dtype=wp.uint8), output: wp.array3d(dtype=wp.uint8)): + i, j, k = wp.tid() + if i >= input.shape[0] or j >= input.shape[1] or k >= input.shape[2]: + return + result = wp.uint8(1) + for dx in range(-1, 2): + for dy in range(-1, 2): + for dz in range(-1, 2): + if (dx == 0 and dy == 0 and dz == 0) or (abs(dx) + abs(dy) + abs(dz) == 1): + ii = wp.clamp(i + dx, 0, input.shape[0] - 1) + jj = wp.clamp(j + dy, 0, input.shape[1] - 1) + kk = wp.clamp(k + dz, 0, input.shape[2] - 1) + if input[ii, jj, kk] == 0: + result = wp.uint8(0) + break + if result == 0: + break + if result == 0: + break + output[i, j, k] = result + + +# Expand OR results back to the padded matrix size on GPU +@wp.kernel +def expand_or_results(or_results: wp.array3d(dtype=wp.uint8), padded: wp.array3d(dtype=wp.uint8)): + i, j, k = wp.tid() + if i >= padded.shape[0] or j >= padded.shape[1] or k >= padded.shape[2]: + return + bx = i // 2 + by = j // 2 + bz = k // 2 + padded[i, j, k] = or_results[bx, by, bz] + + +# Extract the central region of a padded matrix on GPU +@wp.kernel +def extract_center(input: wp.array3d(dtype=wp.uint8), output: wp.array3d(dtype=wp.uint8), px: int, py: int, pz: int): + i, j, k = wp.tid() + if i < output.shape[0] and j < output.shape[1] and k < output.shape[2]: + output[i, j, k] = input[i + px, j + py, k + pz] + + +# Pad an array with zeros on GPU +def pad_array_zeros_gpu(input_arr, pad_width): + px, py, pz = pad_width + shape = input_arr.shape + padded_shape = (shape[0] + 2 * px, shape[1] + 2 * py, shape[2] + 2 * pz) + padded = wp.zeros(padded_shape, dtype=wp.uint8, device=DEVICE) + nx, ny, nz = shape + wp.launch(kernel=copy_to_padded_kernel, dim=(nx, ny, nz), inputs=[input_arr, padded, px, py, pz], device=DEVICE) + return padded + + +# Fill a voxel matrix with optional closing operation on GPU +def fill_gpu(matrix, voxSize, origin, close): + a = (origin / voxSize) % 2 + inds = np.isclose(a, np.round(a), atol=1e-8) + a[inds] = np.round(a[inds]) + paddingLo = np.floor(a).astype(int) + paddingHi = np.round((matrix.shape + paddingLo) % 2).astype(int) + originPad = origin - paddingLo * voxSize + wp_matrix = wp.array(matrix.astype(np.uint8), dtype=wp.uint8, device=DEVICE) + padded_shape = tuple(np.array(matrix.shape) + paddingLo + paddingHi) + padded = wp.zeros(padded_shape, dtype=wp.uint8, device=DEVICE) + nx, ny, nz = matrix.shape + wp.launch(kernel=copy_to_padded_kernel, dim=(nx, ny, nz), inputs=[wp_matrix, padded, paddingLo[0], paddingLo[1], paddingLo[2]], device=DEVICE) + or_results_shape = tuple(np.array(padded_shape) // 2) + or_results = wp.zeros(or_results_shape, dtype=wp.uint8, device=DEVICE) + nx, ny, nz = or_results_shape + wp.launch(kernel=compute_or_blocks, dim=(nx, ny, nz), inputs=[padded, or_results], device=DEVICE) + if close: + padded_or_results = pad_array_zeros_gpu(or_results, (2, 2, 2)) + nx, ny, nz = padded_or_results.shape + dilated = wp.zeros(padded_or_results.shape, dtype=wp.uint8, device=DEVICE) + wp.launch(kernel=binary_dilation_cross, dim=(nx, ny, nz), inputs=[padded_or_results, dilated], device=DEVICE) + eroded = wp.zeros(padded_or_results.shape, dtype=wp.uint8, device=DEVICE) + wp.launch(kernel=binary_erosion_cross, dim=(nx, ny, nz), inputs=[dilated, eroded], device=DEVICE) + nx, ny, nz = or_results_shape + new_or_results = wp.zeros(or_results_shape, dtype=wp.uint8, device=DEVICE) + wp.launch(kernel=extract_center, dim=(nx, ny, nz), inputs=[eroded, new_or_results, 2, 2, 2], device=DEVICE) + or_results = new_or_results + nx, ny, nz = padded_shape + wp.launch(kernel=expand_or_results, dim=(nx, ny, nz), inputs=[or_results, padded], device=DEVICE) + wp.synchronize() + m = padded.numpy().astype(bool) + return m, originPad + + +# Set specified voxel indices to False on GPU +@wp.kernel +def set_false_kernel(matrix: wp.array3d(dtype=wp.uint8), indices: wp.array(dtype=wp.int32, ndim=2), offset: wp.array(dtype=wp.int32, ndim=1)): + tid = wp.tid() + if tid >= indices.shape[0]: + return + ix = indices[tid, 0] + offset[0] + iy = indices[tid, 1] + offset[1] + iz = indices[tid, 2] + offset[2] + if 0 <= ix < matrix.shape[0] and 0 <= iy < matrix.shape[1] and 0 <= iz < matrix.shape[2]: + matrix[ix, iy, iz] = wp.uint8(0) + + +# Remove specified voxels from a matrix on GPU +def remove_gpu(matrix, origin, removeMat, removeOrigin, voxSize): + offset = np.round((removeOrigin - origin) / voxSize).astype(int) + removeIndices = np.argwhere(removeMat) + if len(removeIndices) == 0: + return np.copy(matrix) + wp_matrix = wp.array(matrix.astype(np.uint8), dtype=wp.uint8, device=DEVICE) + wp_indices = wp.array(removeIndices, dtype=wp.int32, device=DEVICE) + wp_offset = wp.array(offset, dtype=wp.int32, device=DEVICE) + wp.launch(kernel=set_false_kernel, dim=len(removeIndices), inputs=[wp_matrix, wp_indices, wp_offset], device=DEVICE) + wp.synchronize() + mat = wp_matrix.numpy().astype(bool) + return mat + + +# Copy a cropped region from a matrix on GPU +@wp.kernel +def copy_cropped(mat: wp.array3d(dtype=wp.uint8), cropped: wp.array3d(dtype=wp.uint8), offset_x: int, offset_y: int, offset_z: int): + i, j, k = wp.tid() + if i < cropped.shape[0] and j < cropped.shape[1] and k < cropped.shape[2]: + cropped[i, j, k] = mat[i + offset_x, j + offset_y, k + offset_z] + + +# Crop a voxel matrix to a specified domain on GPU +def crop_gpu(mat, origin, domainMin, domainMax, v): + cMin = np.round((domainMin - origin) / v).astype(int) + cMax = np.round((domainMax - origin) / v).astype(int) + cropMin = np.maximum(cMin, 0) + cropMax = np.minimum(cMax, mat.shape) + cropped_shape = tuple(cropMax - cropMin) + if any(s <= 0 for s in cropped_shape): + return np.empty((0, 0, 0), dtype=bool), origin + wp_mat = wp.array(mat.astype(np.uint8), dtype=wp.uint8, device=DEVICE) + wp_cropped = wp.zeros(cropped_shape, dtype=wp.uint8, device=DEVICE) + nx, ny, nz = cropped_shape + wp.launch(kernel=copy_cropped, dim=(nx, ny, nz), inputs=[wp_mat, wp_cropped, cropMin[0], cropMin[1], cropMin[2]], device=DEVICE) + wp.synchronize() + cropMat = wp_cropped.numpy().astype(bool) + origin = origin + cropMin * v + return cropMat, origin + + +def pad_to_even(grid): + shape = grid.shape + # Calculate padding: 0 if even, 1 if odd, added to the upper side + pad_width = [(0, 1 if s % 2 != 0 else 0) for s in shape] + padded_grid = np.pad(grid, pad_width, mode="constant", constant_values=False) + # Check if any padding was added + padding_added = [pw[1] for pw in pad_width] # Extract upper padding for each dimension + if any(p > 0 for p in padding_added): + # Map axis indices to x, y, z and report only dimensions with padding + padding_str = ", ".join([f"{'xyz'[i]} by {p}" for i, p in enumerate(padding_added) if p > 0]) + print(f" Padded {padding_str}") + return padded_grid + + +# Voxelize an STL file using Open3D +def voxelize_stl_open3d(stl_filename, length_lbm_unit): + """ + Voxelize an STL file using Open3D. + + Args: + stl_filename (str): Path to the STL file. + length_lbm_unit (float): Voxel size in meters. + + Returns: + tuple: (voxel_matrix, origin, partDomain) + - voxel_matrix: Boolean 3D array representing the voxel grid. + - origin: Coordinates of the grid origin. + - partDomain: List of [max_bound, min_bound]. + """ + tic = time.perf_counter() + mesh = o3d.io.read_triangle_mesh(stl_filename) + if len(mesh.vertices) == 0: + raise ValueError("The mesh is empty or invalid.") + print(f" Number of vertices: {len(mesh.vertices):,}") + print(f" Number of triangles: {len(mesh.triangles):,}") + toc = time.perf_counter() + print(f" Model read in {toc - tic:0.1f} seconds") + + tic = time.perf_counter() + + # Compute the bounds of the mesh + min_bound = np.asarray(mesh.get_min_bound()) + max_bound = np.asarray(mesh.get_max_bound()) + + # Snap bounds to voxel grid + min_bound = np.floor(min_bound / length_lbm_unit) * length_lbm_unit + max_bound = np.ceil(max_bound / length_lbm_unit) * length_lbm_unit + + # Compute grid size with padding to avoid index errors + grid_size = np.ceil((max_bound - min_bound) / length_lbm_unit).astype(int) + 1 # Add 1 for padding + + # Translate mesh to align min_bound with origin + mesh.translate(-min_bound) + + # Voxelize the mesh + voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh(mesh, voxel_size=length_lbm_unit) + voxels = voxel_grid.get_voxels() + voxel_indices = np.array([v.grid_index for v in voxels], dtype=int) if voxels else np.empty((0, 3), dtype=int) + + # Ensure indices are within bounds + voxel_indices = np.clip( + voxel_indices, + [0, 0, 0], + grid_size - 1 # Clip to max valid index + ) + + # Create the voxel matrix + voxel_matrix = np.zeros(grid_size, dtype=bool) + if voxel_indices.size > 0: + voxel_matrix[voxel_indices[:, 0], voxel_indices[:, 1], voxel_indices[:, 2]] = True + + origin = min_bound + partDomain = [max_bound, min_bound] + + toc = time.perf_counter() + print(f" Grid created in {toc - tic:0.1f} seconds") + + return voxel_matrix, origin, partDomain + + +# Print a table of padding values for each level +def print_padding_table(padding_values): + headers = ["Level", "X-", "X+", "Y-", "Y+", "Z-", "Z+"] + table = [[level] + values for level, values in enumerate(padding_values)] + print(tabulate(table, headers=headers, tablefmt="grid")) + + +# Generate a multi-level voxel mesh from an STL file +def makeMesh(levels, filename, voxSize, kernel, domainMultiplier, close=True, ground_refinement_level=-1, ground_voxel_height=4, downsample=-1): + stem = Path(filename).stem + tic = time.perf_counter() + + matrix, origin, partDomain = voxelize_stl_open3d(filename, voxSize) + partSize = partDomain[0] - partDomain[1] + + domainMin = np.array([0, 0, 0], float) + domainMax = np.array( + [ + partDomain[0][0] + (domainMultiplier["x"] * partSize[0]), + partDomain[0][1] + (domainMultiplier["y"] * partSize[1]), + partDomain[0][2] + (domainMultiplier["z"] * partSize[2]), + ], + float, + ) + + # Store original bounds + orig_domainMin = domainMin.copy() + orig_domainMax = domainMax.copy() + + domainSize = domainMax - domainMin + + # Calculate the smallest domain dimension + min_domain_size = np.min(domainSize) + threshold = min_domain_size / 8.0 # 1/8th of the smallest dimension + ratio = threshold / voxSize + + # Check if the base voxel size is valid + if ratio <= 0: + raise ValueError("Voxel size is larger than 1/8th of the smallest domain dimension.") + + # Calculate maximum allowable levels + max_levels_allowed = max(1, int(np.floor(np.log2(ratio))) + 1) + + # Adjust levels if necessary + if levels > max_levels_allowed: + print(f"Reducing levels from {levels} to {max_levels_allowed} to satisfy voxel size constraint.") + levels = max_levels_allowed + + maxVoxSize = voxSize * pow(2, levels - 1) + domainMin = np.round(domainMin / maxVoxSize) * maxVoxSize + domainMax = np.ceil(domainMax / maxVoxSize) * maxVoxSize + + print("\n" + "=" * 100 + "\n") + print("Meshing Configuration:") + print(f"Model: {filename}") + print(f"Finest level: {voxSize} meters") + print(f"Number of levels: {levels}") + print(f"Close islands: {close}") + if ground_refinement_level != -1: + print(f"Ground refinement level: {ground_refinement_level}") + print("Adjusted domain coordinates: ", domainMin, ", ", domainMax) + print("Voxel growth strategy:") + #print_padding_table(padding_values) + print("\n" + "=" * 100 + "\n") + + domainSize = domainMax - domainMin + print("/// Make Mesh started... " + stem) + v = voxSize + + level_data = [] + print("/// Level 0 voxel size: ", v) + ticLevel = time.perf_counter() + if levels == 1: + # Calculate full domain shape + full_shape = np.round(domainSize / voxSize).astype(int) + df = np.ones(full_shape, dtype=bool) + df = pad_to_even(df) # Pad to ensure even shape + dr = df.copy() + dOrigin = domainMin + level_data.append((dr, v, dOrigin, 0)) + else: + g, origin = grow_gpu(matrix, voxSize, origin, kernel[0]) + f, origin = fill_gpu(g, voxSize, origin, close) + df, origin = crop_gpu(f, origin, domainMin, domainMax, v) + df = pad_to_even(df) + dOrigin = np.copy(origin) + dr = df.copy() # dr is the final matrix for this level + + level_data.append((dr, v, dOrigin, 0)) + tocLevel = time.perf_counter() + print(f" Level defined in {tocLevel - ticLevel:0.1f} seconds") + + ground_z = domainMin[2] + + for l in range(1, levels): + ticLevel = time.perf_counter() + d = df[::2, ::2, ::2] + v = v * 2 + print("/// Level", l, "voxel size:", v) + full_shape = np.round(domainSize / v).astype(int) + + if l < levels - 1: + dg, dOrigin = grow_gpu(d, v, dOrigin, kernel[l]) + df_natural, dOrigin = fill_gpu(dg, v, dOrigin, close) + df_natural, dOrigin = crop_gpu(df_natural, dOrigin, domainMin, domainMax, v) + df_natural = pad_to_even(df_natural) + else: + df_natural = np.ones(tuple(full_shape), bool) + dOrigin = domainMin + + if ground_refinement_level != -1 and l == ground_refinement_level: + df_ground = np.zeros(tuple(full_shape), bool) + dOrigin_ground = domainMin + ground_z_index = int(np.round((ground_z - dOrigin_ground[2]) / v)) + # Ground Voxel Thickness + n_thick = ground_voxel_height + if 0 <= ground_z_index < full_shape[2]: + end_z = min(ground_z_index + n_thick, full_shape[2]) + df_ground[:, :, ground_z_index:end_z] = True + + offset = np.round((dOrigin - dOrigin_ground) / v).astype(int) + x0, y0, z0 = offset + x1, y1, z1 = x0 + df_natural.shape[0], y0 + df_natural.shape[1], z0 + df_natural.shape[2] + x0, y0, z0 = np.maximum([x0, y0, z0], 0) + x1, y1, z1 = np.minimum([x1, y1, z1], full_shape) + df = np.zeros(tuple(full_shape), bool) + if x1 > x0 and y1 > y0 and z1 > z0: + df[x0:x1, y0:y1, z0:z1] = df_natural[: (x1 - x0), : (y1 - y0), : (z1 - z0)] + df |= df_ground + dOrigin = dOrigin_ground + else: + df = df_natural + + dr = remove_gpu(df, dOrigin, d, origin, v) + level_data.append((dr, v, dOrigin, l)) + tocLevel = time.perf_counter() + print(f" Level defined in {tocLevel - ticLevel:0.1f} seconds") + origin = np.copy(dOrigin) + + toc = time.perf_counter() + + print() + print("/// Mesh Data Report") + finest_possible_voxels = int(np.prod(domainSize / voxSize)) + total_voxels_billions = finest_possible_voxels / 1e9 + print(f" Total domain size: {total_voxels_billions:.2f} billion voxels (full dense at {voxSize} m)") + + total_voxel_count = sum(np.sum(dr) for dr, _, _, _ in level_data) + total_voxel_count_millions = total_voxel_count / 1e6 + print(f" Total voxel count: {total_voxel_count_millions:.2f} million") + + percentage_reduction = ((finest_possible_voxels - total_voxel_count) / finest_possible_voxels) * 100 if finest_possible_voxels > 0 else 0 + print(f" Percentage reduction: {percentage_reduction:.4f}% (vs. uniform dense grid)") + + print(" Voxel distribution per level:") + headers = ["Level", "Voxel Size (m)", "Voxels (M)", "Percentage (%)", "Computation (%)"] + table_data = [] + # Calculate computational work + num_levels = len(level_data) + comp_work = [] + for l, (dr, v, _, _) in enumerate(level_data): + voxel_count = np.sum(dr) + # Inner iterations: finest level (l=0) has 2^(num_levels-1), coarsest (l=num_levels-1) has 2^0=1 + inner_iterations = 2 ** (num_levels - 1 - l) + work = voxel_count * inner_iterations + comp_work.append(work) + total_work = sum(comp_work) + for l, (dr, v, _, _) in enumerate(level_data): + voxel_count = np.sum(dr) + voxel_count_millions = voxel_count / 1e6 + percentage = (voxel_count / total_voxel_count) * 100 if total_voxel_count > 0 else 0 + comp_percentage = (comp_work[l] / total_work) * 100 if total_work > 0 else 0 + table_data.append([l, v, f"{voxel_count_millions:.2f}", f"{percentage:.2f}", f"{comp_percentage:.2f}"]) + print(tabulate(table_data, headers=headers, tablefmt="grid")) + print() + + # Downsample levels up to (and including) the given 'downsample' threshold. + if downsample >= 0: + print(f"Downsampling levels 0 to {downsample} for file saving; doubling their voxel sizes.") + for i, (dr, v, dOrigin, lev) in enumerate(level_data): + if lev <= downsample: + dr_down = dr[::2, ::2, ::2] + v_down = v * 2 + level_data[i] = (dr_down, v_down, dOrigin, lev) + + ##NEW SHIFT TO FIRST OCTANT + level_data = [(dr, int(v / voxSize), np.round(dOrigin / v).astype(int), l) for dr, v, dOrigin, l in level_data] + + # Domain Adjustment Report + print("/// Domain Extension Report") + print(f" Original domain: {orig_domainMin} to {orig_domainMax}") + print(f" Adjusted domain: {domainMin} to {domainMax}") + + # Calculate extensions in terms of finest voxels + extension_pos = (domainMax - orig_domainMax) / voxSize + extension_neg = (orig_domainMin - domainMin) / voxSize + + print(" Extension in positive directions (in finest voxels):") + print(f" +x: {extension_pos[0]:.2f}") + print(f" +y: {extension_pos[1]:.2f}") + print(f" +z: {extension_pos[2]:.2f}") + print(" Extension in negative directions (in finest voxels):") + print(f" -x: {extension_neg[0]:.2f}") + print(f" -y: {extension_neg[1]:.2f}") + print(f" -z: {extension_neg[2]:.2f}") + print() + + print("/// Level Shapes Report") + headers = ["Level", "Shape", "Origin"] + table_data = [] + for l, (dr, _, dOrigin, _) in enumerate(level_data): + shape_str = f"({dr.shape[0]}, {dr.shape[1]}, {dr.shape[2]})" + origin_str = f"({dOrigin[0]}, {dOrigin[1]}, {dOrigin[2]})" + table_data.append([l, shape_str, origin_str]) + print(tabulate(table_data, headers=headers, tablefmt="grid")) + + print(f"/// Mesh Generation Complete in {toc - tic:0.2f} seconds") + print() + + sparsity_pattern, level_origins = prepare_sparsity_pattern(level_data) + + return level_data, domainSize / voxSize, sparsity_pattern, level_origins + +def prepare_sparsity_pattern(level_data): + """ + Prepare the sparsity pattern for the multiresolution grid based on the level data. + """ + sparsity_pattern = [] + level_origins = [] + for lvl in range(len(level_data)): + level_mask = level_data[lvl][0] + level_mask = np.ascontiguousarray(level_mask, dtype=np.int32) + sparsity_pattern.append(level_mask) + level_origins.append(level_data[lvl][2]) + return sparsity_pattern, level_origins + + +# Calculate convolution kernels based on padding values +def calculate_kernel(padding_values): + kernels = [] + for level, values in enumerate(padding_values): + xn, xp, yn, yp, zn, zp = values + x_dim = max(xp, xn) * 2 + 1 + y_dim = max(yp, yn) * 2 + 1 + z_dim = max(zp, zn) * 2 + 1 + ones_x = xp + xn + 1 + ones_y = yp + yn + 1 + ones_z = zp + zn + 1 + mid_x = (x_dim - 1) // 2 + x1 = mid_x - xp + x2 = x_dim - (mid_x - xn) + mid_y = (y_dim - 1) // 2 + y1 = mid_y - yp + y2 = y_dim - (mid_y - yn) + mid_z = (z_dim - 1) // 2 + z1 = mid_z - zp + z2 = z_dim - (mid_z - zn) + kernel = np.zeros((x_dim, y_dim, z_dim), bool) + kernel[x1:x2, y1:y2, z1:z2] = np.ones((x2 - x1, y2 - y1, z2 - z1), bool) + kernels.append(kernel) + return kernels \ No newline at end of file diff --git a/xlb/utils/mesher.py b/xlb/utils/mesher.py index d71caf93..7201d33f 100644 --- a/xlb/utils/mesher.py +++ b/xlb/utils/mesher.py @@ -6,6 +6,7 @@ import warp as wp + def adjust_bbox(cuboid_max, cuboid_min, voxel_size_up): """ Adjust the bounding box to the nearest points of one level finer grid that encloses the desired region. @@ -18,10 +19,23 @@ def adjust_bbox(cuboid_max, cuboid_min, voxel_size_up): Returns: tuple: (adjusted_min, adjusted_max) snapped to grid points of one level higher. """ + adjusted_min = np.round(cuboid_min / voxel_size_up) * voxel_size_up adjusted_max = np.round(cuboid_max / voxel_size_up) * voxel_size_up return adjusted_min, adjusted_max +def prepare_sparsity_pattern(level_data): + """ + Prepare the sparsity pattern for the multiresolution grid based on the level data. + """ + sparsity_pattern = [] + level_origins = [] + for lvl in range(len(level_data)): + level_mask = level_data[lvl][0] + level_mask = np.ascontiguousarray(level_mask, dtype=np.int32) + sparsity_pattern.append(level_mask) + level_origins.append(level_data[lvl][2]) + return sparsity_pattern, level_origins def make_cuboid_mesh(voxel_size, cuboids, stl_filename): """ @@ -81,13 +95,13 @@ def make_cuboid_mesh(voxel_size, cuboids, stl_filename): xmin, ymin, zmin = adjusted_min xmax, ymax, zmax = adjusted_max - + # Compute number of voxels based on level-specific voxel size nx = int(np.round((xmax - xmin) / voxel_size_level)) ny = int(np.round((ymax - ymin) / voxel_size_level)) nz = int(np.round((zmax - zmin) / voxel_size_level)) print(f"Domain {nx}, {ny}, {nz} Origin {adjusted_min} Voxel Size {voxel_size_level} Voxel Level Up {voxel_level_up}") - + voxel_matrix = np.ones((nx, ny, nz), dtype=bool) origin = adjusted_min @@ -121,11 +135,15 @@ def make_cuboid_mesh(voxel_size, cuboids, stl_filename): num_levels = len(level_data) level_data = [(dr, int(v / voxel_size), np.round(dOrigin / v).astype(int), num_levels - 1 - l) for dr, v, dOrigin, l in level_data] - return list(reversed(level_data)) + level_data = list(reversed(level_data)) + sparsity_pattern, level_origins = prepare_sparsity_pattern(level_data) + + return level_data, sparsity_pattern, level_origins + class MultiresIO(object): - def __init__(self, field_name_cardinality_dict, levels_data, scale=1, offset=(0.0, 0.0, 0.0), store_precision=None): + def __init__(self, field_name_cardinality_dict, levels_data, scale=1, offset=(0.0, 0.0, 0.0), store_precision=None, timestep_size=1): """ Initialize the MultiresIO object. @@ -137,11 +155,13 @@ def __init__(self, field_name_cardinality_dict, levels_data, scale=1, offset=(0. levels_data : list of tuples Each tuple contains (data, voxel_size, origin, level). scale : float or tuple, optional - Scale factor for the coordinates. + Scale factor for the coordinates. Typically smallest voxel size offset : tuple, optional Offset to be applied to the coordinates. store_precision : str, optional The precision policy for storing data. + timestep_size: float + Scale factor to convert velocities to model units. Typically smallest timestep size """ # Process the multires geometry and extract coordinates and connectivity in the coordinate system of the finest level coordinates, connectivity, level_id_field, total_cells = self.process_geometry(levels_data, scale) @@ -164,6 +184,14 @@ def __init__(self, field_name_cardinality_dict, levels_data, scale=1, offset=(0. self.total_cells = total_cells self.centroids = np.mean(coordinates[connectivity], axis=1) + #For convertin velocities to model units + if scale != 1 and timestep_size !=1: + self.velocity_conversion = scale / timestep_size + self.pressure_conversion = (1.204 / 3) * (scale**2 / timestep_size**2) + else: + self.velocity_conversion = 1 + self.pressure_conversion = 1 + # Set the default precision policy if not provided from xlb import DefaultConfig @@ -259,6 +287,36 @@ def _process_voxel_chunk(self, true_indices, origin, voxel_size, point_id_offset return corners, connectivity + def optimal_chunk_shape(self, shape, dtype, target_mb=4, min_chunks=64, max_chunks=4096, max_chunk_mb=64): + """ + Choose a row-major HDF5 chunk shape for compression: + - target_mb: desired uncompressed bytes per chunk (e.g., 4 for gzip, 8–16 for lzf/lz4) + - keeps total number of chunks roughly in [min_chunks, max_chunks] + - caps chunk size at max_chunk_mb to limit per-chunk memory + + Returns a tuple suitable for h5py.create_dataset(..., chunks=...). + """ + + n0 = int(shape[0]) if len(shape) else 1 + itemsize = np.dtype(dtype).itemsize + row_elems = int(np.prod(shape[1:], dtype=np.int64)) if len(shape) > 1 else 1 + row_bytes = max(1, row_elems * itemsize) + + # initial rows by target size + target_bytes = int(target_mb * 1024 * 1024) + rows = max(1, target_bytes // row_bytes) + + # clamp by desired chunk-count window + lower_rows = max(1, (n0 + max_chunks - 1) // max_chunks) # ensures <= max_chunks + upper_rows = max(1, n0 // max_chunks if min_chunks == 0 else n0 // min_chunks) # ensures >= min_chunks + rows = min(max(rows, lower_rows), max(1, upper_rows)) + + # cap by max bytes per chunk + max_bytes = int(max_chunk_mb * 1024 * 1024) + rows = min(rows, max(1, max_bytes // row_bytes)) + + return (min(n0, rows),) + tuple(shape[1:]) + def save_xdmf(self, h5_filename, xmf_filename, total_cells, num_points, fields={}): # Generate an XDMF file to accompany the HDF5 file print(f"\tGenerating XDMF file: {xmf_filename}") @@ -322,19 +380,23 @@ def save_hdf5_file(self, filename, coordinates, connectivity, level_id_field, fi """ import h5py + pts_chunks = self.optimal_chunk_shape(coordinates.shape, np.float32, target_mb=4) + conn_chunks = self.optimal_chunk_shape(connectivity.shape, np.int32, target_mb=4) + lvl_chunks = self.optimal_chunk_shape(level_id_field.shape, np.uint8, target_mb=4) + fld_chunks = self.optimal_chunk_shape((self.total_cells,), np.float32, target_mb=4) + with h5py.File(filename + ".h5", "w") as f: - f.create_dataset("/Mesh/Points", data=coordinates, compression=compression, compression_opts=compression_opts, chunks=True) - f.create_dataset( - "/Mesh/Connectivity", - data=connectivity, - compression=compression, - compression_opts=compression_opts, - chunks=True, - ) - f.create_dataset("/Mesh/Level", data=level_id_field, compression=compression, compression_opts=compression_opts) + f.create_dataset("/Mesh/Points", data=coordinates, compression=compression, compression_opts=compression_opts, chunks=pts_chunks, shuffle=True) + f.create_dataset("/Mesh/Connectivity", data=connectivity, compression=compression, compression_opts=compression_opts, chunks=conn_chunks, shuffle=True) + f.create_dataset("/Mesh/Level", data=level_id_field, compression=compression, compression_opts=compression_opts, chunks=lvl_chunks, shuffle=True) fg = f.create_group("/Fields") for fname, fdata in fields_data.items(): - fg.create_dataset(fname, data=fdata.astype(np.float32), compression=compression, compression_opts=compression_opts, chunks=True) + #Convert lbm velocity to model velocity + if "velocity" in fname.lower(): + fdata = fdata * self.velocity_conversion + elif "rho" in fname.lower(): + fdata = fdata * self.pressure_conversion + fg.create_dataset(fname, data=fdata.astype(np.float32), compression=compression, compression_opts=compression_opts, chunks=fld_chunks, shuffle=True) def _merge_duplicates(self, coordinates, connectivity, levels_data): # Merging duplicate points @@ -445,7 +507,7 @@ def get_fields_data(self, field_neon_dict): # Ensure that this operator is called on multires grids grid_mres = next(iter(field_neon_dict.values())).get_grid() - assert grid_mres.name == "mGrid", f"Operation {self.__class__.__name} is only applicable to multi-resolution cases" + assert grid_mres.name== "mGrid", f"Operation {self.__class__.__name} is only applicable to multi-resolution cases" for field_name in field_neon_dict.keys(): assert field_name in self.field_name_cardinality_dict.keys(), ( @@ -532,8 +594,10 @@ def to_slice_image( grid_res=512, cmap=None, component=None, - show_axes=False, - show_colorbar=False, + show_axes=True, + show_colorbar=True, + normalize=1.0, + output=None, **kwargs, ): """ @@ -558,14 +622,19 @@ def to_slice_image( Physical size of slice grid (width, height). cmap : str Matplotlib colormap. + normalize : float + Factor to scale and normalize data to ensure consistent images + output: str + None = png output, 'array' = no PNG and returns array of results, 'both' = png and returns array """ + # Get the fields data from the NEON fields assert len(field_neon_dict.keys()) == 1, "Error: This function is designed to plot a single field at a time." fields_data = self.get_fields_data(field_neon_dict) # Check if the component is within the valid range if component is None: - print("\tCreating slice image of the field magnitude!") + print("\tCreating slice image of the field magnitude!") cell_data = list(fields_data.values()) squared = [comp**2 for comp in cell_data] cell_data = np.sqrt(sum(squared)) @@ -577,9 +646,17 @@ def to_slice_image( print(f"\tCreating slice image for component {component} of the input field!") field_name = list(fields_data.keys())[component] cell_data = fields_data[field_name] - + if "velocity" in field_name.lower(): + cell_data = cell_data * self.velocity_conversion + elif "rho" in field_name.lower(): + cell_data = cell_data * self.pressure_conversion + + if normalize != 1.0: + cell_data = np.clip((cell_data / normalize),0,1) + else: + cell_data = cell_data # Plot each field in the dictionary - self._to_slice_image_single_field( + result = self._to_slice_image_single_field( f"{output_filename}_{field_name}", cell_data, plane_point, @@ -590,10 +667,18 @@ def to_slice_image( cmap=cmap, show_axes=show_axes, show_colorbar=show_colorbar, + output=output, + normalize=normalize, **kwargs, ) - print(f"\tSlice image for field {field_name} saved as {output_filename}.png") - + if output == 'array': + return result + elif output == 'both': + print(f"\tSlice image for field {field_name} saved as {output_filename}.png") + return result + else: + print(f"\tSlice image for field {field_name} saved as {output_filename}.png") + def _to_slice_image_single_field( self, output_filename, @@ -606,6 +691,8 @@ def _to_slice_image_single_field( cmap, show_axes, show_colorbar, + output, + normalize, **kwargs, ): """ @@ -615,7 +702,8 @@ def _to_slice_image_single_field( import numpy as np import matplotlib.pyplot as plt from scipy.spatial import cKDTree - + + # field data are associated with the cells centers cell_values = field_data @@ -665,35 +753,36 @@ def _to_slice_image_single_field( bounded_y_min = local_y[mask_bounds].min() bounded_y_max = local_y[mask_bounds].max() width_x = bounded_x_max - bounded_x_min - height_y = bounded_y_max - bounded_y_min - aspect_ratio = height_y / width_x - grid_resY = max(1, int(np.round(grid_res * aspect_ratio))) - + height_y = bounded_y_max - bounded_y_min + aspect_ratio = height_y / width_x + grid_resY = max(1, int(np.round(grid_res*aspect_ratio))) # Create grid grid_x = np.linspace(bounded_x_min, bounded_x_max, grid_res) grid_y = np.linspace(bounded_y_min, bounded_y_max, grid_resY) xv, yv = np.meshgrid(grid_x, grid_y, indexing="xy") - + # Fast KDTree-based interpolation points = np.column_stack((local_x[mask_bounds], local_y[mask_bounds])) tree = cKDTree(points) - + # Query points query_points = np.column_stack((xv.ravel(), yv.ravel())) - + # Find k nearest neighbors for smoother interpolation k = min(4, len(points)) # Use 4 neighbors or less if not enough points - distances, indices = tree.query(query_points, k=k, workers=-1) # -1 uses all cores - + distances, indices = tree.query(query_points, k=k, workers=-1) #-1 uses all cores + # Inverse distance weighting epsilon = 1e-10 weights = 1.0 / (distances + epsilon) weights /= weights.sum(axis=1, keepdims=True) - + # Interpolate values neighbor_values = values[mask_bounds][indices] grid_field = (neighbor_values * weights).sum(axis=1).reshape(grid_resY, grid_res) - + + if output == 'array': + return grid_field # Plot if show_colorbar or show_axes: dpi = 300 @@ -704,34 +793,39 @@ def _to_slice_image_single_field( origin="lower", aspect="equal", **kwargs, - ) + ) if show_colorbar: plt.colorbar() if not show_axes: - plt.axis("off") + plt.axis('off') plt.savefig(output_filename + ".png", dpi=dpi, bbox_inches="tight", pad_inches=0) plt.close() else: - plt.imsave(output_filename + ".png", grid_field, cmap=cmap, origin="lower") - - def to_line( - self, - output_filename, - field_neon_dict, - start_point, - end_point, - resolution, + if normalize != 1.0: + plt.imsave(output_filename + ".png", grid_field, cmap=cmap, origin="lower", vmin=0, vmax=1) + else: + plt.imsave(output_filename + ".png", grid_field, cmap=cmap, origin="lower") + + if output == 'both': + return grid_field + + + def to_line(self, + output_filename, + field_neon_dict, + start_point, + end_point, + resolution, component=None, radius=1.0, - **kwargs, - ): + **kwargs,): """ Extract field data along a line between start_point and end_point and save to a CSV file. This function performs two main steps: 1. Extracts field data from field_neon_dict, handling components or computing magnitude. 2. Interpolates the field values along a line defined by start_point and end_point, - then saves the results (coordinates and field values) to a CSV file. + then saves the results (coordinates and field values) to a CSV file. Parameters ---------- @@ -755,7 +849,7 @@ def to_line( The specific component of the field to extract (e.g., 0 for x-component, 1 for y-component). If None, the magnitude of the field is computed. Default is None. radius : int - The specified distance (in units of the coordinate system) to prefilter and query for line plot + The specified distance (in units of the coordinate system) to prefilter and query for line plot Returns ------- @@ -766,11 +860,12 @@ def to_line( ----- - The output CSV file will contain columns: 'x', 'y', 'z', and the value of the field name (e.g., 'velocity_x' or 'velocity_magnitude'). """ - + + # Get the fields data from the NEON fields assert len(field_neon_dict.keys()) == 1, "Error: This function is designed to plot a single field at a time." fields_data = self.get_fields_data(field_neon_dict) - + # Check if the component is within the valid range if component is None: print("\tCreating csv plot of the field magnitude!") @@ -778,7 +873,7 @@ def to_line( squared = [comp**2 for comp in cell_data] cell_data = np.sqrt(sum(squared)) field_name = list(fields_data.keys())[0].split("_")[0] + "_magnitude" - + else: assert component < max(self.field_name_cardinality_dict.values()), ( f"Error: Component {component} is out of range for the provided fields." @@ -786,7 +881,12 @@ def to_line( print(f"\tCreating csv plot for component {component} of the input field!") field_name = list(fields_data.keys())[component] cell_data = fields_data[field_name] - + + if "velocity" in field_name.lower(): + cell_data = cell_data * self.velocity_conversion + elif "rho" in field_name.lower(): + cell_data = cell_data * self.pressure_conversion + # Plot each field in the dictionary self._to_line_field( f"{output_filename}_{field_name}", @@ -798,36 +898,36 @@ def to_line( **kwargs, ) print(f"\tLine Plot for field {field_name} saved as {output_filename}.csv") - + def _to_line_field( - self, - output_filename, - cell_data, - start_point, - end_point, + self, + output_filename, + cell_data, + start_point, + end_point, resolution, radius, **kwargs, - ): + ): """ Helper function to create a line plot for a single field. """ import numpy as np - - # cell_points = self.coordinates[self.connectivity] # Shape: (M, K, 3), where M is num cells, K is nodes per cell - # centroids = np.mean(cell_points, axis=1) # Shape: (M, 3) + + #cell_points = self.coordinates[self.connectivity] # Shape: (M, K, 3), where M is num cells, K is nodes per cell + #centroids = np.mean(cell_points, axis=1) # Shape: (M, 3) centroids = self.centroids p0 = np.array(start_point, dtype=np.float32) p1 = np.array(end_point, dtype=np.float32) - + # direction and parameter t for each centroid - d = p1 - p0 + d = (p1 - p0) L = np.linalg.norm(d) d_unit = d / L v = centroids - p0 t = v.dot(d_unit) closest = p0 + np.outer(t, d_unit) - perp_dist = np.linalg.norm(centroids - closest, axis=1) + perp_dist = np.linalg.norm(centroids-closest, axis=1) # optionally mask to [0,L] or a small perp-radius mask = (t >= 0) & (t <= L) & (perp_dist <= radius) @@ -845,8 +945,14 @@ def _to_line_field( vals_line = np.interp(t_line, t_sorted, data_sorted, left=np.nan, right=np.nan) # reconstruct (x,y,z) - line_xyz = p0[None, :] + t_line[:, None] * d_unit[None, :] + line_xyz = p0[None,:] + t_line[:,None]*d_unit[None,:] # vectorized CSV dump - out = np.hstack([line_xyz, vals_line[:, None]]) - np.savetxt(output_filename + ".csv", out, delimiter=",", header="x,y,z,value", comments="") + out = np.hstack([line_xyz, vals_line[:,None]]) + np.savetxt( + output_filename + '.csv', + out, + delimiter=',', + header='x,y,z,value', + comments='' + )