diff --git a/advection/advection.py b/advection/advection.py index 53a251f..5a69a60 100644 --- a/advection/advection.py +++ b/advection/advection.py @@ -86,14 +86,17 @@ def fill_BCs(self): # right boundary self.a[self.ihi+1+n] = self.a[self.ilo+n] - def norm(self, e): + def norm(self, e, norm="inf"): """ return the norm of quantity e which lives on the grid """ if len(e) != 2*self.ng + self.nx: return None - #return np.sqrt(self.dx*np.sum(e[self.ilo:self.ihi+1]**2)) - return np.max(abs(e[self.ilo:self.ihi+1])) - + if norm==2: + return np.sqrt(self.dx*np.sum(e[self.ilo:self.ihi+1]**2)) + elif norm=="inf": + return np.max(abs(e[self.ilo:self.ihi+1])) + else: + raise NotImplementedError("Only norms implemented are '2' and 'inf'") class Simulation(object): diff --git a/advection/compare_schemes.py b/advection/compare_schemes.py new file mode 100644 index 0000000..a14140d --- /dev/null +++ b/advection/compare_schemes.py @@ -0,0 +1,135 @@ +""" +Comparing efficiency (accuracy vs runtime and memory) for various schemes. + +Note: needs 'pip install memory_profiler' first. + +At least for now, the memory usage seems to be completely misleading. +""" + +import timeit +# from memory_profiler import memory_usage +import numpy +from matplotlib import pyplot +import advection +import weno +import dg + + +def run_weno(order=2, nx=32): + g = advection.Grid1d(nx, ng=order+1, xmin=0, xmax=1) + s = weno.WENOSimulation(g, 1, C=0.5, weno_order=order) + s.init_cond("sine") + s.evolve_scipy() + + +def run_dg(m=3, nx=8, limiter=None): + g = dg.Grid1d(nx, ng=1, xmin=0, xmax=1, m=m) + s = dg.Simulation(g, 1, C=0.5/(2*m+1), limiter=limiter) + s.init_cond("sine") + s.evolve_scipy() + + +def weno_string(order=2, nx=32): + return f"run_weno(order={order}, nx={nx})" + + +def dg_string(m=3, nx=8, limiter=None): + return f"run_dg(m={m}, nx={nx}, limiter='{limiter}')" + + +def time_weno(order=2, nx=32): + return timeit.timeit(weno_string(order, nx), + globals=globals(), number=1) + + +def time_dg(m=3, nx=8, limiter=None): + return timeit.timeit(dg_string(m, nx, limiter), + globals=globals(), number=1) + + +def errs_weno(order=2, nx=32): + g = advection.Grid1d(nx, ng=order+1, xmin=0, xmax=1) + s = weno.WENOSimulation(g, 1, C=0.5, weno_order=order) + s.init_cond("sine") + a_init = g.a.copy() + s.evolve_scipy() + return g.norm(g.a - a_init) + + +def errs_dg(m=3, nx=8, limiter=None): + g = dg.Grid1d(nx, ng=1, xmin=0, xmax=1, m=m) + s = dg.Simulation(g, 1, C=0.5/(2*m+1), limiter=limiter) + s.init_cond("sine") + a_init = g.a.copy() + s.evolve_scipy() + return g.norm(g.a - a_init) + + +weno_orders = [3, 5, 7] +weno_N = [12, 16, 24, 32, 54, 64, 96, 128] +# weno_orders = [3] +# weno_N = [24, 32, 54] +weno_times = numpy.zeros((len(weno_orders), len(weno_N))) +weno_errs = numpy.zeros_like(weno_times) +# weno_memory = numpy.zeros_like(weno_times) + +# Do one evolution to kick-start numba +run_weno(3, 8) +run_dg(3, 8) + +for i_o, order in enumerate(weno_orders): + for i_n, nx in enumerate(weno_N): + print(f"WENO{order}, {nx} points") + weno_errs[i_o, i_n] = errs_weno(order, nx) + weno_times[i_o, i_n] = time_weno(order, nx) +# weno_memory[i_o, i_n] = max(memory_usage((run_weno, (order, nx)))) + +dg_ms = [2, 4, 8] +dg_N = 2**numpy.array(range(3, 7)) +# dg_ms = [2] +# dg_N = 2**numpy.array(range(3, 6)) +dg_moment_times = numpy.zeros((len(dg_ms), len(dg_N))) +dg_moment_errs = numpy.zeros_like(dg_moment_times) +# dg_nolimit_memory = numpy.zeros_like(dg_nolimit_times) +# dg_moment_memory = numpy.zeros_like(dg_moment_times) + +for i_m, m in enumerate(dg_ms): + for i_n, nx in enumerate(dg_N): + print(f"DG, m={m}, {nx} points") + dg_moment_errs[i_m, i_n] = errs_dg(m, nx, 'moment') + dg_moment_times[i_m, i_n] = time_dg(m, nx, 'moment') +# dg_moment_memory[i_m, i_n] = max(memory_usage((run_dg, +# (m, nx, 'moment')))) + +colors = "brk" +fig, ax = pyplot.subplots(1, 1) +for i_o, order in enumerate(weno_orders): + ax.loglog(weno_times[i_o, :], weno_errs[i_o, :], f'{colors[i_o]}o-', + label=f'WENO, r={order}') +colors = "gyc" +for i_m, m in enumerate(dg_ms): + ax.loglog(dg_moment_times[i_m, :], dg_moment_errs[i_m, :], + f'{colors[i_m]}^:', label=rf'DG, $m={m}$') +ax.set_xlabel("Runtime [s]") +ax.set_ylabel(r"$\|$Error$\|_2$") +ax.set_title("Efficiency of WENO vs DG") + +i_o = 0 +i_n = 4 +con_style = "arc,angleA=180,armA=50,rad=10" +ax.annotate(fr'WENO, $N={weno_N[i_n]}$', textcoords='data', + xy=(weno_times[i_o, i_n], 1.5*weno_errs[i_o, i_n]), + xytext=(5e-1, 1e-2), + arrowprops=dict(arrowstyle='->', connectionstyle=con_style)) +i_m = 1 +i_n = 0 +ax.annotate(fr'DG, $N={dg_N[i_n]}$', textcoords='data', + xy=(dg_moment_times[i_m, i_n], 0.5*dg_moment_errs[i_m, i_n]), + xytext=(3e-1, 1e-8), + arrowprops=dict(arrowstyle='->', connectionstyle=con_style)) + +fig.tight_layout() +lgd = ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) +fig.savefig('dg_weno_efficiency.pdf', + bbox_extra_artists=(lgd,), bbox_inches='tight') +pyplot.show() diff --git a/advection/dg.py b/advection/dg.py new file mode 100644 index 0000000..16e0343 --- /dev/null +++ b/advection/dg.py @@ -0,0 +1,606 @@ +""" +Discontinuous Galerkin for the advection equation. +""" + +import numpy +from numpy.polynomial import legendre +from matplotlib import pyplot +import matplotlib as mpl +import quadpy +from scipy.integrate import ode +from numba import jit + +mpl.rcParams['mathtext.fontset'] = 'cm' +mpl.rcParams['mathtext.rm'] = 'serif' + + +class Grid1d(object): + + def __init__(self, nx, ng, xmin=0.0, xmax=1.0, m=3): + + assert m > 0 + + self.ng = ng + self.nx = nx + self.m = m + + self.xmin = xmin + self.xmax = xmax + + # python is zero-based. Make easy intergers to know where the + # real data lives + self.ilo = ng + self.ihi = ng+nx-1 + + # physical coords -- cell-centered, left and right edges + self.dx = (xmax - xmin)/(nx) + self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx + self.xl = xmin + (numpy.arange(nx+2*ng)-ng)*self.dx + self.xr = xmin + (numpy.arange(nx+2*ng)-ng+1.0)*self.dx + + # storage for the solution + # These are the modes of the solution at each point, so the + # first index is the mode + # NO! These are actually the *nodal* values + self.a = numpy.zeros((self.m+1, nx+2*ng), dtype=numpy.float64) + + # Need the Gauss-Lobatto nodes and weights in the reference element + GL = quadpy.line_segment.GaussLobatto(m+1) + self.nodes = GL.points + self.weights = GL.weights + # To go from modal to nodal we need the Vandermonde matrix + self.V = legendre.legvander(self.nodes, m) + c = numpy.eye(m+1) + # Orthonormalize + for p in range(m+1): + self.V[:, p] /= numpy.sqrt(2/(2*p+1)) + c[p, p] /= numpy.sqrt(2/(2*p+1)) + self.V_inv = numpy.linalg.inv(self.V) + self.M = numpy.linalg.inv(self.V @ self.V.T) + self.M_inv = self.V @ self.V.T + # Derivatives of Legendre polynomials lead to derivatives of V + dV = legendre.legval(self.nodes, + legendre.legder(c)).T + self.D = dV @ self.V_inv + # Stiffness matrix for the interior flux + self.S = self.M @ self.D + + # Nodes in the computational coordinates + self.all_nodes = numpy.zeros((m+1)*(nx+2*ng), dtype=numpy.float64) + self.all_nodes_per_node = numpy.zeros_like(self.a) + for i in range(nx+2*ng): + self.all_nodes[(m+1)*i:(m+1)*(i+1)] = (self.x[i] + + self.nodes * self.dx / 2) + self.all_nodes_per_node[:, i] = (self.x[i] + + self.nodes * self.dx / 2) + + def modal_to_nodal(self, modal): + for i in range(self.nx+2*self.ng): + self.a[:, i] = self.V @ modal[:, i] + return self.a + + def nodal_to_modal(self): + modal = numpy.zeros_like(self.a) + for i in range(self.nx+2*self.ng): + modal[:, i] = self.V_inv @ self.a[:, i] + return modal + + def plotting_data(self): + return (self.all_nodes, + self.a.ravel(order='F')) + + def plotting_data_high_order(self, npoints=50): + assert npoints > 2 + p_nodes = numpy.zeros(npoints*(self.nx+2*self.ng), dtype=numpy.float64) + p_data = numpy.zeros_like(p_nodes) + for i in range(self.nx+2*self.ng): + p_nodes[npoints*i:npoints*(i+1)] = numpy.linspace(self.xl[i], + self.xr[i], + npoints) + modal = self.V_inv @ self.a[:, i] + for p in range(self.m+1): + modal[p] /= numpy.sqrt(2/(2*p+1)) + scaled_x = 2 * (p_nodes[npoints*i:npoints*(i+1)] - + self.x[i]) / self.dx + p_data[npoints*i:npoints*(i+1)] = legendre.legval(scaled_x, modal) + return p_nodes, p_data + + def scratch_array(self): + """ return a scratch array dimensioned for our grid """ + return numpy.zeros((self.m+1, self.nx+2*self.ng), dtype=numpy.float64) + + def fill_BCs(self): + """ fill all single ghostcell with periodic boundary conditions """ + + for n in range(self.ng): + # left boundary + self.a[:, self.ilo-1-n] = self.a[:, self.ihi-n] + # right boundary + self.a[:, self.ihi+1+n] = self.a[:, self.ilo+n] + + def norm(self, e): + """ + Return the norm of quantity e which lives on the grid. + + This is the 'broken norm': the quantity is integrated over each + individual element using Gauss-Lobatto quadrature (as we have those + nodes and weights), and the 2-norm of the result is then returned. + """ + if not numpy.allclose(e.shape, self.all_nodes_per_node.shape): + return None + + # This is actually a pointwise norm, not quadrature'd + return numpy.sqrt(self.dx*numpy.sum(e[:, self.ilo:self.ihi+1]**2)) + + +@jit +def minmod3(a1, a2, a3): + """ + Utility function that does minmod on three inputs + """ + signs1 = a1 * a2 + signs2 = a1 * a3 + signs3 = a2 * a3 + same_sign = numpy.logical_and(numpy.logical_and(signs1 > 0, + signs2 > 0), + signs3 > 0) + minmod = numpy.min(numpy.abs(numpy.vstack((a1, a2, a3))), + axis=0) * numpy.sign(a1) + + return numpy.where(same_sign, minmod, numpy.zeros_like(a1)) + + +@jit +def limit(g, limiter): + """ + After evolution, limit the slopes. + """ + + # Limiting! + + if limiter == "moment": + + # Limiting, using moment limiting (Hesthaven p 445-7) + theta = 2 + a_modal = g.nodal_to_modal() + # First, work out where limiting is needed + limiting_todo = numpy.ones(g.nx+2*g.ng, dtype=bool) + limiting_todo[:g.ng] = False + limiting_todo[-g.ng:] = False + # Get the cell average and the nodal values at the boundaries + a_zeromode = a_modal.copy() + a_zeromode[1:, :] = 0 + a_cell = (g.V @ a_zeromode)[0, :] + a_minus = g.a[0, :] + a_plus = g.a[-1, :] + # From the cell averages and boundary values we can construct + # alternate values at the boundaries + a_left = numpy.zeros(g.nx+2*g.ng) + a_right = numpy.zeros(g.nx+2*g.ng) + a_left[1:-1] = a_cell[1:-1] - minmod3(a_cell[1:-1] - a_minus[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + a_right[1:-1] = a_cell[1:-1] + minmod3(a_plus[1:-1] - a_cell[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + limiting_todo[1:-1] = numpy.logical_not( + numpy.logical_and(numpy.isclose(a_left[1:-1], + a_minus[1:-1]), + numpy.isclose(a_right[1:-1], + a_plus[1:-1]))) + # Now, do the limiting. Modify moments where needed, and as soon as + # limiting isn't needed, stop + updated_mode = numpy.zeros(g.nx+2*g.ng) + for i in range(g.m-1, -1, -1): + factor = numpy.sqrt((2*i+3)*(2*i+5)) + a1 = factor * a_modal[i+1, 1:-1] + a2 = theta * (a_modal[i, 2:] - a_modal[i, 1:-1]) + a3 = theta * (a_modal[i, 1:-1] - a_modal[i, :-2]) + updated_mode[1:-1] = minmod3(a1, a2, a3) / factor + did_it_limit = numpy.isclose(a_modal[i+1, 1:-1], + updated_mode[1:-1]) + a_modal[i+1, limiting_todo] = updated_mode[limiting_todo] + limiting_todo[1:-1] = numpy.logical_and(limiting_todo[1:-1], + numpy.logical_not(did_it_limit)) + # Get back nodal values + limited_a = g.modal_to_nodal(a_modal) + + else: + limited_a = g.a + + return limited_a + + +class Simulation(object): + + def __init__(self, grid, u, C=0.8, limiter=None): + self.grid = grid + self.t = 0.0 # simulation time + self.u = u # the constant advective velocity + self.C = C # CFL number + self.limiter = limiter # What it says. + + def init_cond(self, type="sine"): + """ initialize the data """ + if type == "tophat": + def init_a(x): + return numpy.where(numpy.logical_and(x >= 0.333, + x <= 0.666), + numpy.ones_like(x), + numpy.zeros_like(x)) + elif type == "sine": + def init_a(x): + return numpy.sin(2.0 * numpy.pi * x / + (self.grid.xmax - self.grid.xmin)) + elif type == "gaussian": + def init_a(x): + local_xl = x - self.grid.dx/2 + local_xr = x + self.grid.dx/2 + al = 1.0 + numpy.exp(-60.0*(local_xl - 0.5)**2) + ar = 1.0 + numpy.exp(-60.0*(local_xr - 0.5)**2) + ac = 1.0 + numpy.exp(-60.0*(x - 0.5)**2) + + return (1./6.)*(al + 4*ac + ar) + + self.grid.a = init_a(self.grid.all_nodes_per_node) + + def timestep(self): + """ return the advective timestep """ + return self.C*self.grid.dx/self.u + + def period(self): + """ return the period for advection with velocity u """ + return (self.grid.xmax - self.grid.xmin)/self.u + + def states(self): + """ compute the left and right interface states """ + + # Evaluate the nodal values at the domain edges + g = self.grid + + # Extract nodal values + + al = numpy.zeros(g.nx+2*g.ng) + ar = numpy.zeros(g.nx+2*g.ng) + + # i is looping over interfaces, so al is the right edge of the left + # element, etc. + for i in range(g.ilo, g.ihi+2): + al[i] = g.a[-1, i-1] + ar[i] = g.a[0, i] + + return al, ar + + def limit(self): + """ + After evolution, limit the slopes. + """ + + # Evaluate the nodal values at the domain edges + g = self.grid + + # Limiting! + + if self.limiter == "moment": + + # Limiting, using moment limiting (Hesthaven p 445-7) + theta = 2 + a_modal = g.nodal_to_modal() + # First, work out where limiting is needed + limiting_todo = numpy.ones(g.nx+2*g.ng, dtype=bool) + limiting_todo[:g.ng] = False + limiting_todo[-g.ng:] = False + # Get the cell average and the nodal values at the boundaries + a_zeromode = a_modal.copy() + a_zeromode[1:, :] = 0 + a_cell = (g.V @ a_zeromode)[0, :] + a_minus = g.a[0, :] + a_plus = g.a[-1, :] + # From the cell averages and boundary values we can construct + # alternate values at the boundaries + a_left = numpy.zeros(g.nx+2*g.ng) + a_right = numpy.zeros(g.nx+2*g.ng) + a_left[1:-1] = a_cell[1:-1] - minmod3(a_cell[1:-1] - a_minus[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + a_right[1:-1] = a_cell[1:-1] + minmod3(a_plus[1:-1] - a_cell[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + limiting_todo[1:-1] = numpy.logical_not( + numpy.logical_and(numpy.isclose(a_left[1:-1], + a_minus[1:-1]), + numpy.isclose(a_right[1:-1], + a_plus[1:-1]))) + # Now, do the limiting. Modify moments where needed, and as soon as + # limiting isn't needed, stop + updated_mode = numpy.zeros(g.nx+2*g.ng) + for i in range(g.m-1, -1, -1): + factor = numpy.sqrt((2*i+3)*(2*i+5)) + a1 = factor * a_modal[i+1, 1:-1] + a2 = theta * (a_modal[i, 2:] - a_modal[i, 1:-1]) + a3 = theta * (a_modal[i, 1:-1] - a_modal[i, :-2]) + updated_mode[1:-1] = minmod3(a1, a2, a3) / factor + did_it_limit = numpy.isclose(a_modal[i+1, 1:-1], + updated_mode[1:-1]) + a_modal[i+1, limiting_todo] = updated_mode[limiting_todo] + limiting_todo[1:-1] = numpy.logical_and(limiting_todo[1:-1], + numpy.logical_not(did_it_limit)) + # Get back nodal values + g.a = g.modal_to_nodal(a_modal) + + return g.a + + def riemann(self, al, ar): + """ + Riemann problem for advection -- this is simply upwinding, + but we return the flux + """ + + if self.u > 0.0: + return self.u*al + else: + return self.u*ar + + def rk_substep(self): + """ + Take a single RK substep + """ + g = self.grid + g.fill_BCs() + rhs = g.scratch_array() + + # Integrate flux over element + f = self.u * g.a + interior_f = g.S.T @ f + # Use Riemann solver to get fluxes between elements + boundary_f = self.riemann(*self.states()) + rhs = interior_f + rhs[0, 1:-1] += boundary_f[1:-1] + rhs[-1, 1:-1] -= boundary_f[2:] + + # Multiply by mass matrix (inverse). + rhs_i = 2 / g.dx * g.M_inv @ rhs + + return rhs_i + + def evolve(self, num_periods=1): + """ evolve the linear advection equation using RK3 (SSP) """ + self.t = 0.0 + g = self.grid + + tmax = num_periods*self.period() + + # main evolution loop + while self.t < tmax: + # fill the boundary conditions + g.fill_BCs() + + # get the timestep + dt = self.timestep() + + if self.t + dt > tmax: + dt = tmax - self.t + + # RK3 - SSP + # Store the data at the start of the step + a_start = g.a.copy() + k1 = dt * self.rk_substep() + g.a = a_start + k1 + g.a = self.limit() + a1 = g.a.copy() + k2 = dt * self.rk_substep() + g.a = (3 * a_start + a1 + k2) / 4 + g.a = self.limit() + a2 = g.a.copy() + k3 = dt * self.rk_substep() + g.a = (a_start + 2 * a2 + 2 * k3) / 3 + g.a = self.limit() + + self.t += dt + + def evolve_scipy(self, num_periods=1): + """ evolve the linear advection equation using scipy """ + self.t = 0.0 + g = self.grid + + def rk_substep_scipy(t, y): + local_a = numpy.reshape(y, g.a.shape) + # Periodic BCs + local_a[:, :g.ng] = local_a[:, -2*g.ng:-g.ng] + local_a[:, -g.ng:] = local_a[:, g.ng:2*g.ng] + # Integrate flux over element + f = self.u * local_a + interior_f = g.S.T @ f + # Use Riemann solver to get fluxes between elements + al = numpy.zeros(g.nx+2*g.ng) + ar = numpy.zeros(g.nx+2*g.ng) + # i is looping over interfaces, so al is the right edge of the left + # element, etc. + for i in range(g.ilo, g.ihi+2): + al[i] = local_a[-1, i-1] + ar[i] = local_a[0, i] + boundary_f = self.riemann(al, ar) + rhs = interior_f + rhs[0, 1:-1] += boundary_f[1:-1] + rhs[-1, 1:-1] -= boundary_f[2:] + + # Multiply by mass matrix (inverse). + rhs_i = 2 / g.dx * g.M_inv @ rhs + + return numpy.ravel(rhs_i, order='C') + + tmax = num_periods*self.period() + + # main evolution loop + while self.t < tmax: + # fill the boundary conditions + g.fill_BCs() + + # get the timestep + dt = self.timestep() + + if self.t + dt > tmax: + dt = tmax - self.t + + r = ode(rk_substep_scipy).set_integrator('dop853') + r.set_initial_value(numpy.ravel(g.a), self.t) + r.integrate(r.t+dt) + g.a[:, :] = numpy.reshape(r.y, g.a.shape) + g.a = self.limit() + self.t += dt + + +if __name__ == "__main__": + + # Runs with limiter + g_sin_nolimit = Grid1d(16, 1, 0, 1, 3) + g_hat_nolimit = Grid1d(16, 1, 0, 1, 3) + g_sin_moment = Grid1d(16, 1, 0, 1, 3) + g_hat_moment = Grid1d(16, 1, 0, 1, 3) + s_sin_nolimit = Simulation(g_sin_nolimit, 1, 0.5/7, limiter=None) + s_hat_nolimit = Simulation(g_hat_nolimit, 1, 0.5/7, limiter=None) + s_sin_moment = Simulation(g_sin_moment, 1, 0.5/7, limiter="moment") + s_hat_moment = Simulation(g_hat_moment, 1, 0.5/7, limiter="moment") + for s in s_sin_nolimit, s_sin_moment: + s.init_cond("sine") + for s in s_hat_nolimit, s_hat_moment: + s.init_cond("tophat") + x_sin_start, a_sin_start = g_sin_nolimit.plotting_data() + x_hat_start, a_hat_start = g_hat_nolimit.plotting_data() + for s in s_sin_nolimit, s_sin_moment, s_hat_nolimit, s_hat_moment: + s.evolve() + fig, axes = pyplot.subplots(1, 2) + axes[0].plot(x_sin_start, a_sin_start, 'k-', label='Exact') + axes[1].plot(x_hat_start, a_hat_start, 'k-', label='Exact') + for i, g in enumerate((g_sin_nolimit, g_hat_nolimit)): + axes[i].plot(*g.plotting_data(), 'b--', label='No limiting') + axes[i].set_xlim(0, 1) + axes[i].set_xlabel(r"$x$") + axes[i].set_ylabel(r"$a$") + for i, g in enumerate((g_sin_moment, g_hat_moment)): + axes[i].plot(*g.plotting_data(), 'r:', label='Moment limiting') + axes[i].set_xlim(0, 1) + axes[i].set_xlabel(r"$x$") + axes[i].set_ylabel(r"$a$") + fig.tight_layout() + lgd = axes[1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) + fig.savefig('dg_limiter.pdf', + bbox_extra_artists=(lgd,), bbox_inches='tight') + pyplot.show() + + # ------------------------------------------------------------------------- + # Show the "grid" using a sine wave + + xmin = 0.0 + xmax = 1.0 + nx = 4 + ng = 1 + + u = 1.0 + + colors = "kbr" + symbols = "sox" + for m in range(1, 4): + g = Grid1d(nx, ng, xmin=xmin, xmax=xmax, m=m) + s = Simulation(g, u, C=0.5/(2*m+1)) + s.init_cond("sine") + plot_x, plot_a = g.plotting_data() + plot_x_hi, plot_a_hi = g.plotting_data_high_order() + pyplot.plot(plot_x, plot_a, f'{colors[m-1]}{symbols[m-1]}', + label=fr"Nodes, $m={{{m}}}$") + pyplot.plot(plot_x_hi, plot_a_hi, f'{colors[m-1]}:', + label=fr"Modes, $m={{{m}}}$") + pyplot.xlim(0, 1) + pyplot.vlines([0.25, 0.5, 0.75], -2, 2, linestyles='--') + pyplot.ylim(-1, 1) + pyplot.xlabel(r'$x$') + pyplot.ylabel(r'$a$') + pyplot.legend() + pyplot.savefig("dg_grid.pdf") + pyplot.show() + + # Note that the highest m (4) doesn't converge at the expected rate - + # probably limited by the time integrator. + colors = "brckgy" + symbols = "xo^ 0 + + self.ng = ng + self.nx = nx + self.m = m + + self.xmin = xmin + self.xmax = xmax + + # python is zero-based. Make easy intergers to know where the + # real data lives + self.ilo = ng + self.ihi = ng+nx-1 + + # physical coords -- cell-centered, left and right edges + self.dx = (xmax - xmin)/(nx) + self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx + self.xl = xmin + (numpy.arange(nx+2*ng)-ng)*self.dx + self.xr = xmin + (numpy.arange(nx+2*ng)-ng+1.0)*self.dx + + # storage for the solution + # These are the modes of the solution at each point, so the + # first index is the mode + # NO! These are actually the *nodal* values + self.u = numpy.zeros((self.m+1, nx+2*ng), dtype=numpy.float64) + + # Need the Gauss-Lobatto nodes and weights in the reference element + GL = quadpy.line_segment.GaussLobatto(m+1) + self.nodes = GL.points + self.weights = GL.weights + # To go from modal to nodal we need the Vandermonde matrix + self.V = legendre.legvander(self.nodes, m) + c = numpy.eye(m+1) + # Orthonormalize + for p in range(m+1): + self.V[:, p] /= numpy.sqrt(2/(2*p+1)) + c[p, p] /= numpy.sqrt(2/(2*p+1)) + self.V_inv = numpy.linalg.inv(self.V) + self.M = numpy.linalg.inv(self.V @ self.V.T) + self.M_inv = self.V @ self.V.T + # Derivatives of Legendre polynomials lead to derivatives of V + dV = legendre.legval(self.nodes, + legendre.legder(c)).T + self.D = dV @ self.V_inv + # Stiffness matrix for the interior flux + self.S = self.M @ self.D + + # Nodes in the computational coordinates + self.all_nodes = numpy.zeros((m+1)*(nx+2*ng), dtype=numpy.float64) + self.all_nodes_per_node = numpy.zeros_like(self.u) + for i in range(nx+2*ng): + self.all_nodes[(m+1)*i:(m+1)*(i+1)] = (self.x[i] + + self.nodes * self.dx / 2) + self.all_nodes_per_node[:, i] = (self.x[i] + + self.nodes * self.dx / 2) + + def modal_to_nodal(self, modal): + for i in range(self.nx+2*self.ng): + self.u[:, i] = self.V @ modal[:, i] + return self.u + + def nodal_to_modal(self): + modal = numpy.zeros_like(self.u) + for i in range(self.nx+2*self.ng): + modal[:, i] = self.V_inv @ self.u[:, i] + return modal + + def plotting_data(self): + return (self.all_nodes, + self.u.ravel(order='F')) + + def plotting_data_high_order(self, npoints=50): + assert npoints > 2 + p_nodes = numpy.zeros(npoints*(self.nx+2*self.ng), dtype=numpy.float64) + p_data = numpy.zeros_like(p_nodes) + for i in range(self.nx+2*self.ng): + p_nodes[npoints*i:npoints*(i+1)] = numpy.linspace(self.xl[i], + self.xr[i], + npoints) + modal = self.V_inv @ self.u[:, i] + for p in range(self.m+1): + modal[p] /= numpy.sqrt(2/(2*p+1)) + scaled_x = 2 * (p_nodes[npoints*i:npoints*(i+1)] - + self.x[i]) / self.dx + p_data[npoints*i:npoints*(i+1)] = legendre.legval(scaled_x, modal) + return p_nodes, p_data + + def scratch_array(self): + """ return a scratch array dimensioned for our grid """ + return numpy.zeros((self.m+1, self.nx+2*self.ng), dtype=numpy.float64) + + def fill_BCs(self): + """ fill all single ghostcell with periodic boundary conditions """ + + for n in range(self.ng): + # left boundary + self.u[:, self.ilo-1-n] = self.u[:, self.ihi-n] + # right boundary + self.u[:, self.ihi+1+n] = self.u[:, self.ilo+n] + + def norm(self, e): + """ + Return the norm of quantity e which lives on the grid. + + This is the 'broken norm': the quantity is integrated over each + individual element using Gauss-Lobatto quadrature (as we have those + nodes and weights), and the 2-norm of the result is then returned. + """ + if not numpy.allclose(e.shape, self.all_nodes_per_node.shape): + return None + + # This is actually a pointwise norm, not quadrature'd + return numpy.sqrt(self.dx*numpy.sum(e[:, self.ilo:self.ihi+1]**2)) + + +@jit +def burgers_flux(u): + return u**2/2 + + +@jit +def minmod3(a1, a2, a3): + """ + Utility function that does minmod on three inputs + """ + signs1 = a1 * a2 + signs2 = a1 * a3 + signs3 = a2 * a3 + same_sign = numpy.logical_and(numpy.logical_and(signs1 > 0, + signs2 > 0), + signs3 > 0) + minmod = numpy.min(numpy.abs(numpy.vstack((a1, a2, a3))), + axis=0) * numpy.sign(a1) + + return numpy.where(same_sign, minmod, numpy.zeros_like(a1)) + + +@jit +def limit(g, limiter): + """ + After evolution, limit the slopes. + """ + + # Limiting! + + if limiter == "moment": + + # Limiting, using moment limiting (Hesthaven p 445-7) + theta = 2 + a_modal = g.nodal_to_modal() + # First, work out where limiting is needed + limiting_todo = numpy.ones(g.nx+2*g.ng, dtype=bool) + limiting_todo[:g.ng] = False + limiting_todo[-g.ng:] = False + # Get the cell average and the nodal values at the boundaries + a_zeromode = a_modal.copy() + a_zeromode[1:, :] = 0 + a_cell = (g.V @ a_zeromode)[0, :] + a_minus = g.u[0, :] + a_plus = g.u[-1, :] + # From the cell averages and boundary values we can construct + # alternate values at the boundaries + a_left = numpy.zeros(g.nx+2*g.ng) + a_right = numpy.zeros(g.nx+2*g.ng) + a_left[1:-1] = a_cell[1:-1] - minmod3(a_cell[1:-1] - a_minus[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + a_right[1:-1] = a_cell[1:-1] + minmod3(a_plus[1:-1] - a_cell[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + limiting_todo[1:-1] = numpy.logical_not( + numpy.logical_and(numpy.isclose(a_left[1:-1], + a_minus[1:-1]), + numpy.isclose(a_right[1:-1], + a_plus[1:-1]))) + # Now, do the limiting. Modify moments where needed, and as soon as + # limiting isn't needed, stop + updated_mode = numpy.zeros(g.nx+2*g.ng) + for i in range(g.m-1, -1, -1): + factor = numpy.sqrt((2*i+3)*(2*i+5)) + a1 = factor * a_modal[i+1, 1:-1] + a2 = theta * (a_modal[i, 2:] - a_modal[i, 1:-1]) + a3 = theta * (a_modal[i, 1:-1] - a_modal[i, :-2]) + updated_mode[1:-1] = minmod3(a1, a2, a3) / factor + did_it_limit = numpy.isclose(a_modal[i+1, 1:-1], + updated_mode[1:-1]) + a_modal[i+1, limiting_todo] = updated_mode[limiting_todo] + limiting_todo[1:-1] = numpy.logical_and(limiting_todo[1:-1], + numpy.logical_not(did_it_limit)) + # Get back nodal values + limited_a = g.modal_to_nodal(a_modal) + + else: + limited_a = g.u + + return limited_a + + +class Simulation(object): + + def __init__(self, grid, C=0.8, limiter=None): + self.grid = grid + self.t = 0.0 # simulation time + self.C = C # CFL number + self.limiter = limiter # What it says. + + def init_cond(self, type="sine"): + """ initialize the data """ + if type == "tophat": + def init_u(x): + return numpy.where(numpy.logical_and(x >= 0.333, + x <= 0.666), + numpy.ones_like(x), + numpy.zeros_like(x)) + elif type == "sine": + def init_u(x): + return numpy.where(numpy.logical_and(x >= 0.333, + x <= 0.666), + numpy.ones_like(x) + + 0.5 * numpy.sin( + 2.0 * numpy.pi * + (x - 0.333) / 0.333), + numpy.ones_like(x)) + elif type == "smooth_sine": + def init_u(x): + return numpy.sin(2.0 * numpy.pi * x / + (self.grid.xmax - self.grid.xmin)) + elif type == "gaussian": + def init_u(x): + local_xl = x - self.grid.dx/2 + local_xr = x + self.grid.dx/2 + al = 1.0 + numpy.exp(-60.0*(local_xl - 0.5)**2) + ar = 1.0 + numpy.exp(-60.0*(local_xr - 0.5)**2) + ac = 1.0 + numpy.exp(-60.0*(x - 0.5)**2) + + return (1./6.)*(al + 4*ac + ar) + + self.grid.u = init_u(self.grid.all_nodes_per_node) + + def timestep(self): + """ return the advective timestep """ + return self.C*self.grid.dx/numpy.max(numpy.abs( + self.grid.u[:, self.grid.ilo:self.grid.ihi+1])) + + def states(self): + """ compute the left and right interface states """ + + # Evaluate the nodal values at the domain edges + g = self.grid + + # Extract nodal values + + al = numpy.zeros(g.nx+2*g.ng) + ar = numpy.zeros(g.nx+2*g.ng) + + # i is looping over interfaces, so al is the right edge of the left + # element, etc. + for i in range(g.ilo, g.ihi+2): + al[i] = g.u[-1, i-1] + ar[i] = g.u[0, i] + + return al, ar + + def limit(self): + """ + After evolution, limit the slopes. + """ + + # Evaluate the nodal values at the domain edges + g = self.grid + + # Limiting! + + if self.limiter == "moment": + + # Limiting, using moment limiting (Hesthaven p 445-7) + theta = 2 + a_modal = g.nodal_to_modal() + # First, work out where limiting is needed + limiting_todo = numpy.ones(g.nx+2*g.ng, dtype=bool) + limiting_todo[:g.ng] = False + limiting_todo[-g.ng:] = False + # Get the cell average and the nodal values at the boundaries + a_zeromode = a_modal.copy() + a_zeromode[1:, :] = 0 + a_cell = (g.V @ a_zeromode)[0, :] + a_minus = g.u[0, :] + a_plus = g.u[-1, :] + # From the cell averages and boundary values we can construct + # alternate values at the boundaries + a_left = numpy.zeros(g.nx+2*g.ng) + a_right = numpy.zeros(g.nx+2*g.ng) + a_left[1:-1] = a_cell[1:-1] - minmod3(a_cell[1:-1] - a_minus[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + a_right[1:-1] = a_cell[1:-1] + minmod3(a_plus[1:-1] - a_cell[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + limiting_todo[1:-1] = numpy.logical_not( + numpy.logical_and(numpy.isclose(a_left[1:-1], + a_minus[1:-1]), + numpy.isclose(a_right[1:-1], + a_plus[1:-1]))) + # Now, do the limiting. Modify moments where needed, and as soon as + # limiting isn't needed, stop + updated_mode = numpy.zeros(g.nx+2*g.ng) + for i in range(g.m-1, -1, -1): + factor = numpy.sqrt((2*i+3)*(2*i+5)) + a1 = factor * a_modal[i+1, 1:-1] + a2 = theta * (a_modal[i, 2:] - a_modal[i, 1:-1]) + a3 = theta * (a_modal[i, 1:-1] - a_modal[i, :-2]) + updated_mode[1:-1] = minmod3(a1, a2, a3) / factor + did_it_limit = numpy.isclose(a_modal[i+1, 1:-1], + updated_mode[1:-1]) + a_modal[i+1, limiting_todo] = updated_mode[limiting_todo] + limiting_todo[1:-1] = numpy.logical_and(limiting_todo[1:-1], + numpy.logical_not(did_it_limit)) + # Get back nodal values + g.u = g.modal_to_nodal(a_modal) + + return g.u + + def riemann(self, ul, ur, alpha): + """ + Riemann problem for Burgers using Lax-Friedrichs + """ + + return ((burgers_flux(ul) + burgers_flux(ur)) - (ur - ul)*alpha)/2 + + def rk_substep(self, dt): + """ + Take a single RK substep + """ + g = self.grid + g.fill_BCs() + rhs = g.scratch_array() + + # Integrate flux over element + f = burgers_flux(g.u) + interior_f = g.S.T @ f + # Use Riemann solver to get fluxes between elements + boundary_f = self.riemann(*self.states(), numpy.max(numpy.abs(g.u))) + rhs = interior_f + rhs[0, 1:-1] += boundary_f[1:-1] + rhs[-1, 1:-1] -= boundary_f[2:] + + # Multiply by mass matrix (inverse). + rhs_i = 2 / g.dx * g.M_inv @ rhs + + return rhs_i + + def evolve(self, tmax): + """ evolve Burgers equation using RK3 (SSP) """ + self.t = 0.0 + g = self.grid + + # main evolution loop +# pbar = tqdm.tqdm(total=100) + while self.t < tmax: + # pbar.update(100*self.t/tmax) + # fill the boundary conditions + g.fill_BCs() + + # get the timestep + dt = self.timestep() + + if self.t + dt > tmax: + dt = tmax - self.t + + # RK3 - SSP + # Store the data at the start of the step + a_start = g.u.copy() + k1 = dt * self.rk_substep(dt) + g.u = a_start + k1 + g.u = self.limit() + g.fill_BCs() + a1 = g.u.copy() + k2 = dt * self.rk_substep(dt) + g.u = (3 * a_start + a1 + k2) / 4 + g.u = self.limit() + g.fill_BCs() + a2 = g.u.copy() + k3 = dt * self.rk_substep(dt) + g.u = (a_start + 2 * a2 + 2 * k3) / 3 + g.u = self.limit() + g.fill_BCs() + + self.t += dt +# pbar.close() + + +if __name__ == "__main__": + + # Runs with limiter + nx = 128 + ng = 1 + xmin = 0 + xmax = 1 + ms = [1, 3, 5] + colors = 'brcy' + for i_m, m in enumerate(ms): + g = Grid1d(nx, ng, xmin, xmax, m) + s = Simulation(g, C=0.5/(2*m+1), limiter="moment") + s.init_cond("sine") + x, u = g.plotting_data() + x_start = x.copy() + u_start = u.copy() + s.evolve(0.2) + x_end, u_end = g.plotting_data() + pyplot.plot(x_end.copy(), u_end.copy(), f'{colors[i_m]}-', + label=rf'$m={m}$') + pyplot.plot(x_start, u_start, 'k--', alpha=0.5, label='Initial data') + pyplot.xlim(0, 1) + pyplot.legend() + pyplot.xlabel(r'$x$') + pyplot.ylabel(r'$u$') + pyplot.show() diff --git a/compressible/dg_euler.py b/compressible/dg_euler.py new file mode 100644 index 0000000..8fa0f96 --- /dev/null +++ b/compressible/dg_euler.py @@ -0,0 +1,606 @@ +""" +Discontinuous Galerkin for the Euler equations. + +Just doing limiting on the conserved variables directly. +""" + +import numpy +from numpy.polynomial import legendre +from matplotlib import pyplot +import matplotlib as mpl +import quadpy +from numba import jit +import tqdm +import riemann +import weno_euler + +mpl.rcParams['mathtext.fontset'] = 'cm' +mpl.rcParams['mathtext.rm'] = 'serif' + + +class Grid1d(object): + + def __init__(self, nx, ng, xmin=0.0, xmax=1.0, m=3): + + assert m > 0 + + self.ncons = 3 # 1d hydro, 3 conserved variables + self.ng = ng + self.nx = nx + self.m = m + + self.xmin = xmin + self.xmax = xmax + + # python is zero-based. Make easy intergers to know where the + # real data lives + self.ilo = ng + self.ihi = ng+nx-1 + + # physical coords -- cell-centered, left and right edges + self.dx = (xmax - xmin)/(nx) + self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx + self.xl = xmin + (numpy.arange(nx+2*ng)-ng)*self.dx + self.xr = xmin + (numpy.arange(nx+2*ng)-ng+1.0)*self.dx + + # storage for the solution + # These are the modes of the solution at each point, so the + # first index is the mode + # NO! These are actually the *nodal* values + self.u = numpy.zeros((self.ncons, self.m+1, nx+2*ng), + dtype=numpy.float64) + + # Need the Gauss-Lobatto nodes and weights in the reference element + GL = quadpy.line_segment.GaussLobatto(m+1) + self.nodes = GL.points + self.weights = GL.weights + # To go from modal to nodal we need the Vandermonde matrix + self.V = legendre.legvander(self.nodes, m) + c = numpy.eye(m+1) + # Orthonormalize + for p in range(m+1): + self.V[:, p] /= numpy.sqrt(2/(2*p+1)) + c[p, p] /= numpy.sqrt(2/(2*p+1)) + self.V_inv = numpy.linalg.inv(self.V) + self.M = numpy.linalg.inv(self.V @ self.V.T) + self.M_inv = self.V @ self.V.T + # Derivatives of Legendre polynomials lead to derivatives of V + dV = legendre.legval(self.nodes, + legendre.legder(c)).T + self.D = dV @ self.V_inv + # Stiffness matrix for the interior flux + self.S = self.M @ self.D + + # Nodes in the computational coordinates + self.all_nodes = numpy.zeros((m+1)*(nx+2*ng), dtype=numpy.float64) + self.all_nodes_per_node = numpy.zeros_like(self.u[0]) + for i in range(nx+2*ng): + self.all_nodes[(m+1)*i:(m+1)*(i+1)] = (self.x[i] + + self.nodes * self.dx / 2) + self.all_nodes_per_node[:, i] = (self.x[i] + + self.nodes * self.dx / 2) + + def modal_to_nodal(self, modal): + for n in range(self.ncons): + for i in range(self.nx+2*self.ng): + self.u[n, :, i] = self.V @ modal[n, :, i] + return self.u + + def nodal_to_modal(self): + modal = numpy.zeros_like(self.u) + for n in range(self.ncons): + for i in range(self.nx+2*self.ng): + modal[n, :, i] = self.V_inv @ self.u[n, :, i] + return modal + + def plotting_data(self): + u_plotting = numpy.zeros((self.ncons, (self.m+1)*(self.nx+2*self.ng))) + for n in range(self.ncons): + u_plotting[n, :] = self.u[n, :, :].ravel(order='F') + return (self.all_nodes, + u_plotting) + + def scratch_array(self): + """ return a scratch array dimensioned for our grid """ + return numpy.zeros((self.ncons, self.m+1, self.nx+2*self.ng), + dtype=numpy.float64) + + def fill_BCs(self): + """ fill all single ghostcell with outflow boundary conditions """ + + for n in range(self.ng): + # left boundary + self.u[:, :, self.ilo-1-n] = self.u[:, :, self.ilo] + # right boundary + self.u[:, :, self.ihi+1+n] = self.u[:, :, self.ihi] + + +@jit +def euler_flux(u, eos_gamma): + flux = numpy.zeros_like(u) + rho = u[0] + S = u[1] + E = u[2] + v = S / rho + p = (eos_gamma - 1) * (E - rho * v**2 / 2) + flux[0] = S + flux[1] = S * v + p + flux[2] = (E + p) * v + return flux + + +@jit +def minmod3(a1, a2, a3): + """ + Utility function that does minmod on three inputs + """ + signs1 = a1 * a2 + signs2 = a1 * a3 + signs3 = a2 * a3 + same_sign = numpy.logical_and(numpy.logical_and(signs1 > 0, + signs2 > 0), + signs3 > 0) + minmod = numpy.min(numpy.abs(numpy.vstack((a1, a2, a3))), + axis=0) * numpy.sign(a1) + + return numpy.where(same_sign, minmod, numpy.zeros_like(a1)) + + +@jit +def limit(g, limiter): + """ + After evolution, limit the slopes. + """ + + # Limiting! + + if limiter == "moment": + + # Limiting, using moment limiting (Hesthaven p 445-7) + theta = 2 + a_modal = g.nodal_to_modal() + # First, work out where limiting is needed + limiting_todo = numpy.ones(g.nx+2*g.ng, dtype=bool) + limiting_todo[:g.ng] = False + limiting_todo[-g.ng:] = False + # Get the cell average and the nodal values at the boundaries + a_zeromode = a_modal.copy() + a_zeromode[1:, :] = 0 + a_cell = (g.V @ a_zeromode)[0, :] + a_minus = g.u[0, :] + a_plus = g.u[-1, :] + # From the cell averages and boundary values we can construct + # alternate values at the boundaries + a_left = numpy.zeros(g.nx+2*g.ng) + a_right = numpy.zeros(g.nx+2*g.ng) + a_left[1:-1] = a_cell[1:-1] - minmod3(a_cell[1:-1] - a_minus[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + a_right[1:-1] = a_cell[1:-1] + minmod3(a_plus[1:-1] - a_cell[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + limiting_todo[1:-1] = numpy.logical_not( + numpy.logical_and(numpy.isclose(a_left[1:-1], + a_minus[1:-1]), + numpy.isclose(a_right[1:-1], + a_plus[1:-1]))) + # Now, do the limiting. Modify moments where needed, and as soon as + # limiting isn't needed, stop + updated_mode = numpy.zeros(g.nx+2*g.ng) + for i in range(g.m-1, -1, -1): + factor = numpy.sqrt((2*i+3)*(2*i+5)) + a1 = factor * a_modal[i+1, 1:-1] + a2 = theta * (a_modal[i, 2:] - a_modal[i, 1:-1]) + a3 = theta * (a_modal[i, 1:-1] - a_modal[i, :-2]) + updated_mode[1:-1] = minmod3(a1, a2, a3) / factor + did_it_limit = numpy.isclose(a_modal[i+1, 1:-1], + updated_mode[1:-1]) + a_modal[i+1, limiting_todo] = updated_mode[limiting_todo] + limiting_todo[1:-1] = numpy.logical_and(limiting_todo[1:-1], + numpy.logical_not(did_it_limit)) + # Get back nodal values + limited_a = g.modal_to_nodal(a_modal) + + else: + limited_a = g.u + + return limited_a + + +class Simulation(object): + + def __init__(self, grid, C=0.8, eos_gamma=1.4, limiter=None): + self.grid = grid + self.t = 0.0 # simulation time + self.C = C # CFL number + self.eos_gamma = eos_gamma + self.limiter = limiter # What it says. + + def init_cond(self, type="sod"): + """ initialize the data """ + if type == "sod": + rho_l = 1 + rho_r = 1 / 8 + v_l = 0 + v_r = 0 + p_l = 1 + p_r = 1 / 10 + S_l = rho_l * v_l + S_r = rho_r * v_r + e_l = p_l / rho_l / (self.eos_gamma - 1) + e_r = p_r / rho_r / (self.eos_gamma - 1) + E_l = rho_l * (e_l + v_l**2 / 2) + E_r = rho_r * (e_r + v_r**2 / 2) + x = self.grid.all_nodes_per_node + self.grid.u[0] = numpy.where(x < 0, + rho_l * numpy.ones_like(x), + rho_r * numpy.ones_like(x)) + self.grid.u[1] = numpy.where(x < 0, + S_l * numpy.ones_like(x), + S_r * numpy.ones_like(x)) + self.grid.u[2] = numpy.where(x < 0, + E_l * numpy.ones_like(x), + E_r * numpy.ones_like(x)) + elif type == "advection": + x = self.grid.all_nodes_per_node + rho_0 = 1e-3 + rho_1 = 1 + sigma = 0.1 + rho = rho_0 * numpy.ones_like(x) + rho += (rho_1 - rho_0) * numpy.exp(-(x-0.5)**2/sigma**2) + v = numpy.ones_like(x) + p = 1e-6 * numpy.ones_like(x) + S = rho * v + e = p / rho / (self.eos_gamma - 1) + E = rho * (e + v**2 / 2) + self.grid.u[0, :] = rho[:] + self.grid.u[1, :] = S[:] + self.grid.u[2, :] = E[:] + elif type == "double rarefaction": + rho_l = 1 + rho_r = 1 + v_l = -2 + v_r = 2 + p_l = 0.4 + p_r = 0.4 + S_l = rho_l * v_l + S_r = rho_r * v_r + e_l = p_l / rho_l / (self.eos_gamma - 1) + e_r = p_r / rho_r / (self.eos_gamma - 1) + E_l = rho_l * (e_l + v_l**2 / 2) + E_r = rho_r * (e_r + v_r**2 / 2) + x = self.grid.all_nodes_per_node + self.grid.u[0] = numpy.where(x < 0, + rho_l * numpy.ones_like(x), + rho_r * numpy.ones_like(x)) + self.grid.u[1] = numpy.where(x < 0, + S_l * numpy.ones_like(x), + S_r * numpy.ones_like(x)) + self.grid.u[2] = numpy.where(x < 0, + E_l * numpy.ones_like(x), + E_r * numpy.ones_like(x)) + elif type == "shock-entropy": + x = self.grid.all_nodes_per_node + rho_l = 3.857143 * numpy.ones_like(x) + rho_r = 0.2 * numpy.sin(numpy.pi*x) + 1 + v_l = 2.629369 * numpy.ones_like(x) + v_r = 0 * numpy.ones_like(x) + p_l = 10.33333 * numpy.ones_like(x) + p_r = 1 * numpy.ones_like(x) + S_l = rho_l * v_l + S_r = rho_r * v_r + e_l = p_l / rho_l / (self.eos_gamma - 1) + e_r = p_r / rho_r / (self.eos_gamma - 1) + E_l = rho_l * (e_l + v_l**2 / 2) + E_r = rho_r * (e_r + v_r**2 / 2) + self.grid.u[0] = numpy.where(x < -4, + rho_l, + rho_r) + self.grid.u[1] = numpy.where(x < -4, + S_l, + S_r) + self.grid.u[2] = numpy.where(x < -4, + E_l, + E_r) + + def max_lambda(self): + rho = self.grid.u[0] + v = self.grid.u[1] / rho + p = (self.eos_gamma - 1) * (self.grid.u[2, :] - rho * v**2 / 2) + cs = numpy.sqrt(self.eos_gamma * p / rho) + return numpy.max(numpy.abs(v) + cs) + + def timestep(self): + return self.C * self.grid.dx / self.max_lambda() + + def states(self): + """ compute the left and right interface states """ + + # Evaluate the nodal values at the domain edges + g = self.grid + + # Extract nodal values + al = numpy.zeros((g.ncons, g.nx+2*g.ng)) + ar = numpy.zeros((g.ncons, g.nx+2*g.ng)) + + # i is looping over interfaces, so al is the right edge of the left + # element, etc. + for i in range(g.ilo, g.ihi+2): + al[:, i] = g.u[:, -1, i-1] + ar[:, i] = g.u[:, 0, i] + + return al, ar + + def limit(self): + """ + After evolution, limit the slopes. + """ + + # Evaluate the nodal values at the domain edges + g = self.grid + + # Limiting! + + if self.limiter == "moment": + + # Limiting, using moment limiting (Hesthaven p 445-7) + theta = 2 + a_modal = g.nodal_to_modal() + # First, work out where limiting is needed + limiting_todo = numpy.ones(g.nx+2*g.ng, dtype=bool) + limiting_todo[:g.ng] = False + limiting_todo[-g.ng:] = False + # Get the cell average and the nodal values at the boundaries + a_zeromode = a_modal.copy() + a_zeromode[:, 1:, :] = 0 + for n in range(g.ncons): + a_cell = (g.V @ a_zeromode[n, :, :])[0, :] + a_minus = g.u[n, 0, :] + a_plus = g.u[n, -1, :] + # From the cell averages and boundary values we can construct + # alternate values at the boundaries + a_left = numpy.zeros(g.nx+2*g.ng) + a_right = numpy.zeros(g.nx+2*g.ng) + a_left[1:-1] = a_cell[1:-1] - minmod3(a_cell[1:-1] - a_minus[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + a_right[1:-1] = a_cell[1:-1] + minmod3(a_plus[1:-1] - a_cell[1:-1], + a_cell[1:-1] - a_cell[:-2], + a_cell[2:] - a_cell[1:-1]) + limiting_todo[1:-1] = numpy.logical_not( + numpy.logical_and(numpy.isclose(a_left[1:-1], + a_minus[1:-1]), + numpy.isclose(a_right[1:-1], + a_plus[1:-1]))) + # Now, do the limiting. Modify moments where needed, and as soon as + # limiting isn't needed, stop + updated_mode = numpy.zeros(g.nx+2*g.ng) + for i in range(g.m-1, -1, -1): + factor = numpy.sqrt((2*i+3)*(2*i+5)) + a1 = factor * a_modal[n, i+1, 1:-1] + a2 = theta * (a_modal[n, i, 2:] - a_modal[n, i, 1:-1]) + a3 = theta * (a_modal[n, i, 1:-1] - a_modal[n, i, :-2]) + updated_mode[1:-1] = minmod3(a1, a2, a3) / factor + did_it_limit = numpy.isclose(a_modal[n, i+1, 1:-1], + updated_mode[1:-1]) + a_modal[n, i+1, limiting_todo] = updated_mode[limiting_todo] + limiting_todo[1:-1] = numpy.logical_and(limiting_todo[1:-1], + numpy.logical_not(did_it_limit)) + # Get back nodal values + g.u = g.modal_to_nodal(a_modal) + + return g.u + + def riemann(self, ul, ur, alpha): + """ + Riemann problem for Burgers using Lax-Friedrichs + """ + fl = euler_flux(ul, self.eos_gamma) + fr = euler_flux(ur, self.eos_gamma) + return ((fl + fr) - (ur - ul)*alpha)/2 + + def rk_substep(self, dt): + """ + Take a single RK substep + """ + g = self.grid + g.fill_BCs() + rhs = g.scratch_array() + + # Integrate flux over element + f = euler_flux(g.u, self.eos_gamma) + interior_f = g.scratch_array() + for n in range(g.ncons): + interior_f[n, :, :] = g.S.T @ f[n, :, :] + # Use Riemann solver to get fluxes between elements + boundary_f = self.riemann(*self.states(), self.max_lambda()) + rhs = interior_f + rhs[:, 0, 1:-1] += boundary_f[:, 1:-1] + rhs[:, -1, 1:-1] -= boundary_f[:, 2:] + + # Multiply by mass matrix (inverse). + rhs_i = g.scratch_array() + for n in range(g.ncons): + rhs_i[n, :, :] = 2 / g.dx * g.M_inv @ rhs[n, :, :] + + return rhs_i + + def evolve(self, tmax): + """ evolve Burgers equation using RK3 (SSP) """ + self.t = 0.0 + g = self.grid + + # main evolution loop + # Estimate number of timesteps + nt_estimate = int(tmax / self.timestep() * 10) + prev_it = 0 + pbar = tqdm.tqdm(total=nt_estimate) + while self.t < tmax: + curr_it = int(nt_estimate * self.t / tmax) + pbar.update(curr_it - prev_it) + prev_it = curr_it +# print(self.t) + # fill the boundary conditions + g.fill_BCs() + + # get the timestep + dt = self.timestep() + + if self.t + dt > tmax: + dt = tmax - self.t + + # RK3 - SSP + # Store the data at the start of the step + a_start = g.u.copy() + k1 = dt * self.rk_substep(dt) + g.u = a_start + k1 + g.u = self.limit() + g.fill_BCs() + a1 = g.u.copy() + k2 = dt * self.rk_substep(dt) + g.u = (3 * a_start + a1 + k2) / 4 + g.u = self.limit() + g.fill_BCs() + a2 = g.u.copy() + k3 = dt * self.rk_substep(dt) + g.u = (a_start + 2 * a2 + 2 * k3) / 3 + g.u = self.limit() + g.fill_BCs() + + self.t += dt + pbar.close() + + +def plot_sod(nx, filename=None): + # setup the problem -- Sod + left = riemann.State(p=1.0, u=0.0, rho=1.0) + right = riemann.State(p=0.1, u=0.0, rho=0.125) + + rp = riemann.RiemannProblem(left, right) + rp.find_star_state() + + x_e, rho_e, v_e, p_e = rp.sample_solution(0.2, 1024) + e_e = p_e / 0.4 / rho_e + x_e -= 0.5 + + # Runs with limiter + ng = 1 + xmin = -0.5 + xmax = 0.5 + ms = [1, 3, 5] + colors = 'bry' + fig, axes = pyplot.subplots(2, 2) + axes[0, 0].plot(x_e, rho_e, 'k-', linewidth=2) + axes[0, 1].plot(x_e, v_e, 'k-', linewidth=2, label="Exact") + axes[1, 0].plot(x_e, p_e, 'k-', linewidth=2) + axes[1, 1].plot(x_e, e_e, 'k-', linewidth=2) + for i_m, m in enumerate(ms): + g = Grid1d(nx, ng, xmin, xmax, m) + s = Simulation(g, C=0.1/(2*m+1), limiter="moment") + s.init_cond("sod") + s.evolve(0.2) + x, u = g.plotting_data() + rho = u[0, :] + v = u[1, :] / u[0, :] + e = (u[2, :] - rho * v**2 / 2) / rho + p = (s.eos_gamma - 1) * (u[2, :] - rho * v**2 / 2) + axes[0, 0].plot(x, rho, f'{colors[i_m]}--') + axes[0, 1].plot(x, v, f'{colors[i_m]}--', label=fr"$m={m}$") + axes[1, 0].plot(x, p, f'{colors[i_m]}--') + axes[1, 1].plot(x, e, f'{colors[i_m]}--') + axes[1, 0].set_xlabel(r"$x$") + axes[1, 1].set_xlabel(r"$x$") + axes[0, 0].set_ylabel(r"$\rho$") + axes[0, 1].set_ylabel(r"$v$") + axes[1, 0].set_ylabel(r"$p$") + axes[1, 1].set_ylabel(r"$e$") + axes[0, 0].set_title("Discontinuous Galerkin") + axes[0, 1].set_title(rf"$N={nx}$, Sod problem") + for ax in axes.flatten(): + ax.set_xlim(-0.5, 0.5) + fig.tight_layout() + lgd = axes[0, 1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) + if filename: + fig.savefig(filename, + bbox_extra_artists=(lgd,), bbox_inches='tight') + pyplot.show() + + +def weno_reference_shock_entropy(nx=512): + """ + A reference solution for the shock-entropy problem. + + Note that this is slow: 20 minutes or so using the standard parameters, + and obviously more resolution would be better. + """ + order = 4 + ng = order+2 + C = 0.5 + xmin = -5 + xmax = 5 + g = weno_euler.Grid1d(nx, ng, xmin, xmax, bc="outflow") + s = weno_euler.WENOSimulation(g, C, order) + s.init_cond("shock-entropy") + s.evolve(1.8, reconstruction='characteristic') + return g.x, g.q + +def plot_shock_entropy(nx, m, x_ref, u_ref, filename=None): + # Runs with limiter + ng = 1 + xmin = -5 + xmax = 5 + fig, axes = pyplot.subplots(2, 2) + rho_ref = u_ref[0, :] + v_ref = u_ref[1, :] / u_ref[0, :] + e_ref = (u_ref[2, :] - rho_ref * v_ref**2 / 2) / rho_ref + p_ref = 0.4 * (u_ref[2, :] - rho_ref * v_ref**2 / 2) + axes[0, 0].plot(x_ref, rho_ref, 'k--') + axes[0, 1].plot(x_ref, v_ref, 'k--', label="Reference") + axes[1, 0].plot(x_ref, p_ref, 'k--') + axes[1, 1].plot(x_ref, e_ref, 'k--') + # Evolve DG + g = Grid1d(nx, ng, xmin, xmax, m) + s = Simulation(g, C=0.1/(2*m+1), limiter="moment") + s.init_cond("shock-entropy") + s.evolve(1.8) + x, u = g.plotting_data() + rho = u[0, :] + v = u[1, :] / u[0, :] + e = (u[2, :] - rho * v**2 / 2) / rho + p = (s.eos_gamma - 1) * (u[2, :] - rho * v**2 / 2) + axes[0, 0].plot(x, rho, 'b-') + axes[0, 1].plot(x, v, 'b-', label=fr"$m={m}$") + axes[1, 0].plot(x, p, 'b-') + axes[1, 1].plot(x, e, 'b-') + axes[1, 0].set_xlabel(r"$x$") + axes[1, 1].set_xlabel(r"$x$") + axes[0, 0].set_ylabel(r"$\rho$") + axes[0, 1].set_ylabel(r"$v$") + axes[1, 0].set_ylabel(r"$p$") + axes[1, 1].set_ylabel(r"$e$") + axes[0, 0].set_title("Discontinuous Galerkin") + axes[0, 1].set_title(rf"$N={nx}$, shock-entropy problem") + for ax in axes.flatten(): + ax.set_xlim(xmin, xmax) + fig.tight_layout() + lgd = axes[0, 1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) + if filename: + fig.savefig(filename, + bbox_extra_artists=(lgd,), bbox_inches='tight') + pyplot.show() + + +if __name__ == "__main__": + + # Runs with limiter +# for nx in [32, 128]: +# plot_sod(nx, f'dg_sod_{nx}.pdf') + + # Reference solution for shock-entropy using WENO + x_ref, u_ref = weno_reference_shock_entropy() + + for m in [1, 3, 5]: + for nx in [128, 256]: + plot_shock_entropy(nx, m, x_ref, u_ref, + f'dg_shock_entropy_m{m}_N{nx}.pdf') + diff --git a/compressible/weno_euler.py b/compressible/weno_euler.py index fd7575d..c33307e 100644 --- a/compressible/weno_euler.py +++ b/compressible/weno_euler.py @@ -3,6 +3,7 @@ from matplotlib import pyplot import weno_coefficients import riemann +import tqdm class Grid1d(object): @@ -172,6 +173,29 @@ def init_cond(self, type="sod"): self.grid.q[2] = numpy.where(self.grid.x < 0, E_l * numpy.ones_like(self.grid.x), E_r * numpy.ones_like(self.grid.x)) + elif type == "shock-entropy": + x = self.grid.x + rho_l = 3.857143 * numpy.ones_like(x) + rho_r = 0.2 * numpy.sin(numpy.pi*x) + 1 + v_l = 2.629369 * numpy.ones_like(x) + v_r = 0 * numpy.ones_like(x) + p_l = 10.33333 * numpy.ones_like(x) + p_r = 1 * numpy.ones_like(x) + S_l = rho_l * v_l + S_r = rho_r * v_r + e_l = p_l / rho_l / (self.eos_gamma - 1) + e_r = p_r / rho_r / (self.eos_gamma - 1) + E_l = rho_l * (e_l + v_l**2 / 2) + E_r = rho_r * (e_r + v_r**2 / 2) + self.grid.q[0] = numpy.where(x < -4, + rho_l, + rho_r) + self.grid.q[1] = numpy.where(x < -4, + S_l, + S_r) + self.grid.q[2] = numpy.where(x < -4, + E_l, + E_r) def max_lambda(self): @@ -297,7 +321,14 @@ def evolve(self, tmax, reconstruction = 'componentwise'): stepper = self.rk_substep_characteristic # main evolution loop + # Estimate number of timesteps + nt_estimate = int(tmax / self.timestep() * 10) + prev_it = 0 + pbar = tqdm.tqdm(total=nt_estimate) while self.t < tmax: + curr_it = int(nt_estimate * self.t / tmax) + pbar.update(curr_it - prev_it) + prev_it = curr_it # fill the boundary conditions g.fill_BCs() @@ -331,7 +362,7 @@ def evolve(self, tmax, reconstruction = 'componentwise'): self.t += dt # print("t=", self.t) - + pbar.close() if __name__ == "__main__":