Skip to content

PBM bug report #1

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
171 changes: 171 additions & 0 deletions code/HIGG_ZN_BATCH.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 13:22:49 2023

@author: User
"""
import os
from pathlib import Path
import sys

root_dir = Path('../../').resolve()
sys.path.append(root_dir.as_posix())

import numpy as np
from create_Batch_model import *
import matplotlib.pyplot as plt

from create_Batch_model import create_model, update_nex
from CADETProcess.comparison import calculate_sse
from CADETProcess.simulator import Cadet
from CADETProcess import settings


from CADETProcess.optimization import OptimizationProblem
from CADETProcess.optimization import U_NSGA3

def setup_optimization_problem():
# crystal phase discretization
n_x = 100 + 2 # total number of components c_feed being the first, c_eq being the last
x_c = 1e-6 # m
x_max = 900e-6 # m

# Spacing
x_grid_n = np.logspace(np.log10(x_c), np.log10(x_max), n_x-1)

x_ct_n = []
for p in range(1, n_x-1):
x_ct_n.append((x_grid_n[p] + x_grid_n[p-1]) / 2)
x_ct_n = np.asarray(x_ct_n)

t_int = 2

# optimization problem
simulator = Cadet(r"C:\Users\User\cadet\cadet3\bin\cadet-cli.exe") # intrinsic distribution implementation, HR in z

optimization_problem = OptimizationProblem('HIGG_ZN_BATCH')

# there is an ub limit, possibly in pymoo, approximately 1e16
optimization_problem.add_variable('growth rate constant', lb=1e-9, ub=1e-5)
optimization_problem.add_variable('growth rate exponent', lb=0.1, ub=2.0)
optimization_problem.add_variable('nucleation rate', lb=1e12, ub=5e15)
optimization_problem.add_variable('nucleation exponent', lb=0.5, ub=4)
optimization_problem.add_variable('fitted ex particle count', lb=1, ub=1000)


def objective(x):
model = create_model()
# growth
model.root.input.model.unit_001.reaction_bulk.cry_growth_rate_constant = x[0]
model.root.input.model.unit_001.reaction_bulk.cry_g = x[1]

# nucleation
model.root.input.model.unit_001.reaction_bulk.cry_primary_nucleation_rate = x[2]
model.root.input.model.unit_001.reaction_bulk.cry_u = x[3]

n=update_nex(x[4])

filename = simulator.get_tempfile_name()

model.filename = filename
model.save()
model = simulator.run_h5(filename)

# calculate residual
residual = 0.0
try:
for i in range (t_int, t_int+11):
n_x_t = model.root.output.solution.unit_001.solution_outlet[i,1:-1]
residual += calculate_sse(n_x_t/1e14, n[i]/1e14)
except IndexError:
return 1e10

# plotting
fig=plt.figure(figsize = (9,4.5))
plt.suptitle(f"kg:{np.format_float_scientific(x[0],precision=2)}, g:{np.format_float_scientific(x[1],precision=2)}, kb:{np.format_float_scientific(x[2],precision=2)}, b:{np.format_float_scientific(x[3],precision=2)}, par:{np.format_float_scientific(x[4],precision=2)}", size="xx-small")

ax = fig.add_subplot(1,2,1)
for i in range (t_int, t_int+11):
n_x_t = model.root.output.solution.unit_001.solution_outlet[i,1:-1]
ax.plot(x_ct_n*1e6,n_x_t)
ax.set_xscale("log")
ax.set_xlabel('$chord~length~/~\mu m}$')
ax.set_ylabel('$n~/~(1/m/m^3)$')

ax = fig.add_subplot(1,2,2)
for i in range (t_int, t_int+11):
ax.plot(x_ct_n*1e6,n[i])
ax.set_xscale("log")
ax.set_xlabel('$chord~length~/~\mu m}$')

plt.savefig(f'{settings.working_directory}/{residual}.png', dpi=80)
plt.close(fig)

# remove .h5 file
os.remove(model.filename)

return residual

optimizer = U_NSGA3()

optimizer.n_cores = 7
optimizer.pop_size = 21 # better: 100-200
optimizer.n_max_gen = 5

settings.working_directory = f'{optimization_problem.name}'

optimization_problem.add_objective(objective)
optimization_problem.parallelization_backend = "joblib"

return optimizer, optimization_problem


print("started optimization")
if __name__ == '__main__':
optimizer, optimization_problem = setup_optimization_problem()

optimization_results = optimizer.optimize(
optimization_problem,
use_checkpoint=False
)
print("ended optimization")

print(f"the best parameters are {optimization_results.x[0]}")

## plot the best results
model = create_model()
# growth
model.root.input.model.unit_001.reaction_bulk.cry_growth_rate_constant = optimization_results.x[0, 0]
model.root.input.model.unit_001.reaction_bulk.cry_g = optimization_results.x[0, 1]

# nucleation
model.root.input.model.unit_001.reaction_bulk.cry_primary_nucleation_rate = optimization_results.x[0, 2]
model.root.input.model.unit_001.reaction_bulk.cry_u = optimization_results.x[0, 3]

n=update_nex(optimization_results.x[0, 4])

filename = simulator.get_tempfile_name()
model.filename = filename
model.save()
model = simulator.run_h5(filename)
os.remove(model.filename)

# plot
fig=plt.figure(figsize = (9,4.5))
ax = fig.add_subplot(1,2,1)
for i in range (t_int, t_int+11):
n_x_t = model.root.output.solution.unit_001.solution_outlet[i,1:-1]
ax.plot(x_ct_n*1e6,n_x_t)
ax.set_xscale("log")
ax.set_xlabel('$chord~length~/~\mu m}$')
ax.set_ylabel('$n~/~(1/m/m^3)$')
ax.set_title("Simulated")

ax = fig.add_subplot(1,2,2)
for i in range (t_int, t_int+11):
ax.plot(x_ct_n*1e6,n[i])
ax.set_xscale("log")
ax.set_xlabel('$chord~length~/~\mu m}$')
ax.set_title("Experimental")

plt.show()
114 changes: 114 additions & 0 deletions code/cld_psd_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 13:24:53 2023

@author: User
"""

import numpy as np
from scipy.optimize import nnls
from scipy.signal import sosfiltfilt, butter
from numpy import tan as tan

## matrix A. it only depends on the grid and particle structure, independent on the cld.
def get_A(grid, cir:"aspect ratio")->"array of shape (M x N)":
## probability funtion for a sphere
def pdf(L_l, L_u, a_i):
if (L_u <= L_l):
raise ValueError ("the uuper bound must be larger than the lower bound")

if (L_u <= 2.0*a_i):
return np.sqrt(1.0-(L_l/2.0/a_i)**2) - np.sqrt(1.0-(L_u/2.0/a_i)**2)
elif (L_l> 2.0*a_i):
return 0
else:
return np.sqrt(1.0-(L_l/2.0/a_i)**2)

## probability funtion for an ellipse
def pdf_ellip(L_l, L_u, a, a_i, cir):
if (L_u <= L_l):
raise ValueError ("the upper bound must be larger than the lower bound")

if (a == np.pi) or (a == 0.0):
if (L_u <= 2.0*a_i):
return np.sqrt(1.0-(L_l/2.0/a_i)**2) - np.sqrt(1.0-(L_u/2.0/a_i)**2)
elif (L_l> 2.0*a_i):
return 0
else:
return np.sqrt(1.0-(L_l/2.0/a_i)**2)

elif (a == np.pi*0.5) or (a == np.pi*1.5):
if (L_u <= 2.0*cir*a_i):
return np.sqrt(1.0-(L_l/2.0/a_i/cir)**2) - np.sqrt(1.0-(L_u/2.0/a_i/cir)**2)
elif (L_l> 2.0*cir*a_i):
return 0
else:
return np.sqrt(1.0-(L_l/2.0/a_i/cir)**2)

else:
if 2.0*cir*a_i*np.sqrt((1+tan(a)**2) / (cir**2 + tan(a)**2)) >= L_u:
return np.sqrt(1.0-(cir**2 + tan(a)**2)/(1.0+tan(a)**2) * (L_l/2.0/cir/a_i)**2) - np.sqrt(1.0-(cir**2 + tan(a)**2)/(1.0+tan(a)**2) * (L_u/2.0/cir/a_i)**2)
elif 2.0*cir*a_i*np.sqrt((1+tan(a)**2) / (cir**2 + tan(a)**2)) < L_l:
return 0
else:
return np.sqrt(1.0-(cir**2 + tan(a)**2)/(1.0+tan(a)**2) * (L_l/2.0/cir/a_i)**2)

# grid
def get_centers(grid):
ct = []
for p in range(1, len(grid)):
ct.append((grid[p] + grid[p-1]) / 2)
ct = np.asarray(ct)
return ct

## bin number of chords M and particle count N, assume they use the same grid, but can be changed later
M = len(grid) - 1
N = len(grid) - 1

## chrod length grid
l_ct = get_centers(grid)
l_grid = grid

## particle size grid
x_ct = get_centers(grid)
x_grid = grid

## format A
if cir == 1.0:
A = []
for j in range (0, M):
for i in range (0, N):
A.append(pdf(l_grid[j], l_grid[j+1], x_ct[i]/2.0))
A = np.asarray(A)
A = np.reshape(A, (M,N))

elif cir < 1.0 and cir > 0.0:
A = []
itg_res = 100 ## this can be changed, usually 100 is enough
pi_space = np.linspace(0, 2.0*np.pi, itg_res)
for j in range (0, M):
for i in range (0, N):
area_wrapper = []
for k in range (0, itg_res):
a = pi_space[k]
area_wrapper.append(pdf_ellip(x_grid[j], x_grid[j+1], a, x_ct[i]/2.0/np.sqrt(cir), cir)) # numerical integration
A.append(np.trapz(area_wrapper, pi_space) / 2.0/np.pi)
A = np.asarray(A)
A = np.reshape(A, (M,N))

else:
raise ValueError ("the cir must be between 0 and 1.")

return A


## cld to psd
def cld_to_psd(A, cld:"normlaized cld", fre:"frequency of Butterworth"=0.35, order:"order of Butterworth"=1)->"normalized filtered psd, res" :
## inverse cld to get psd
X_inverse, residuals = nnls(A, cld)

## the order and frequency can be adjusted
sos = butter(order, fre, output='sos') # default is lowpass
filtd_X = sosfiltfilt(sos, X_inverse)

return filtd_X, residuals
Loading