diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/EGOSelect.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/EGOSelect.m deleted file mode 100644 index 14771de16..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/EGOSelect.m +++ /dev/null @@ -1,124 +0,0 @@ -function PopDec = EGOSelect(Problem,Population,L1,L2,Ke,delta,nr) -% Selecting Points for Function Evaluation with the assistance of the -% Gaussian models - -%------------------------------- Copyright -------------------------------- -% Copyright (c) 2024 BIMK Group. You are free to use the PlatEMO for -% research purposes. All publications which use this platform or any code -% in the platform should acknowledge the use of "PlatEMO" and reference "Ye -% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform -% for evolutionary multi-objective optimization [educational forum], IEEE -% Computational Intelligence Magazine, 2017, 12(4): 73-87". -%-------------------------------------------------------------------------- - -% This function is written by Cheng He - - %% Fuzzy clustering the solutions - [model,centers] = FCMmodel(Problem,Population,L1,L2); - - %% MOEA/D-DE optimization, where the popsize is set to N, the maximum evaluations is maxeva - Z = min(Population.objs,[],1); - maxeva = 20*(11*Problem.D-1); - - %% Generate the weight vectors - [W,N] = UniformPoint(300,Problem.M); - T = ceil(N/10); - B = pdist2(W,W); - [~,B] = sort(B,2); - B = B(:,1:T); - duplicated = randi(length(Population),N,1); - NewPop = Population(duplicated); % Sample N individuals from the initial population - PopDec = NewPop.decs; PopObj = NewPop.objs; - gmin = inf; - while (maxeva>0) - for i = 1 : N - if rand < delta - P = B(i,randperm(size(B,2))); - else - P = randperm(N); - end - OffDec = OperatorDE(Problem,PopDec(i,:),PopDec(P(1),:),PopDec(P(2),:)); - OffObj = Evaluate(Problem,OffDec,model,centers); - Z = min(Z,OffObj); - g_old = max(abs(PopObj(P,:) - repmat(Z,length(P),1)).*W(P,:),[],2); - g_new = max(repmat(abs(OffObj-Z),length(P),1).*W(P,:),[],2); - gmin = min([gmin,min(g_old),min(g_new)]); - offindex = P(find(g_old>g_new,nr)); - if ~isempty(offindex) - PopDec(offindex,:) = repmat(OffDec,length(offindex),1); - PopObj(offindex,:) = repmat(OffObj,length(offindex),1); - end - end - maxeva = maxeva - N; - end - - %% Select the unsimilar candidate solutions - Q = []; temp = Population.decs; - for i = 1 : N - if min(pdist2(real(PopDec(i,:)),real(temp))) > 1e-5 - Q = [Q,i]; - temp = [temp;PopDec(i,:)]; - end - end - PopDec = PopDec(Q,:); - - %% Kmeans cluster the solutions into Ke clusters and select the solutions with the maximum EI in each cluster - cindex = kmeans(real(PopDec),Ke); - Q = []; - for i = 1 : Ke - index = find(cindex == i); - temp = PopDec(index,:); - [tempObj,tempMSE] = Evaluate(Problem,temp,model,centers); - K = length(index); - EI = zeros(K,1); - for j = 1 : K - [~,r] = min(max(abs(repmat(tempObj(j,:)-Z,size(W,1),1)).*W,[],2)); - EI(j) = EICal(real(tempObj(j,:)),Z,W(r,:),abs(tempMSE(j,:)),gmin); - end - [~,best] = max(EI); - Q = [Q,index(best)]; - end - PopDec = PopDec(Q,:); -end - -function EI = EICal(Obj,Z,lamda,MSE,Gbest) -% Calculate the expected improvement - - M = size(Obj,2); - u = lamda.*(Obj - Z); - sigma2 = MSE; - lamda0 = lamda(1:2); mu0 = u(1:2); sig20 = sigma2(1:2); - [y,x] = GPcal(lamda0,mu0,abs(sig20)); - if M >= 3 - for i = 3 : M - lamda0 = [1, lamda(i)]; mu0 = [y,u(i)]; sig20 = [x,sigma2(i)]; - [y,x] = GPcal(lamda0,mu0,abs(sig20)); - end - end - EI = (Gbest-y)*normcdf((Gbest-y)/sqrt(abs(x))) + sqrt(abs(x))*normpdf((Gbest-y)/sqrt(abs(x))); -end - -function [y,x] = GPcal(lamda,mu,sig2) -% Calculate the mu (y) and sigma^2 (x) of the aggregation function - - tao = sqrt(lamda(1)^2*sig2(1)+lamda(2)^2*sig2(2)); - alpha = (mu(1)-mu(2))/tao; - y = mu(1)*normcdf(alpha) + mu(2)*normcdf(-alpha) + tao*normpdf(alpha); - x = (mu(1)^2+lamda(1)^2*sig2(1))*normcdf(alpha) + (mu(2)^2+lamda(2)^2*sig2(2))*normcdf(-alpha) + sum(mu)*normpdf(alpha) - y.^2; -end - -function [PopObj,MSE] = Evaluate(Problem,PopDec,model,centers) -% Predict the objective vector of the candidate solutions accodring to the -% Euclidean distance from each candidate solution to evaluated solutions - - D = pdist2(real(PopDec),real(centers)); - [~,index] = min(D,[],2); - N = size(PopDec,1); - PopObj = zeros(N,Problem.M); - MSE = zeros(N,Problem.M); - for i = 1 : N - for j = 1 : Problem.M - [PopObj(i,j),~,MSE(i,j)] = predictor(PopDec(i,:),model{index(i),j}); - end - end -end \ No newline at end of file diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/FCMmodel.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/FCMmodel.m deleted file mode 100644 index 871f60ac4..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/FCMmodel.m +++ /dev/null @@ -1,40 +0,0 @@ -function [model,centers] = FCMmodel(Problem,Population,L1,L2) -% Fuzzy clustering-based method for modeling c_size* M models, where c_size -% is the number of clusters and M the number of objectives. - -%------------------------------- Copyright -------------------------------- -% Copyright (c) 2024 BIMK Group. You are free to use the PlatEMO for -% research purposes. All publications which use this platform or any code -% in the platform should acknowledge the use of "PlatEMO" and reference "Ye -% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform -% for evolutionary multi-objective optimization [educational forum], IEEE -% Computational Intelligence Magazine, 2017, 12(4): 73-87". -%-------------------------------------------------------------------------- - -% This function is written by Cheng He - - PopDec = Population.decs; - csize = 1 + ceil((length(Population)-L1)/L2); - [centers,~] = fcm(PopDec,csize,[2 NaN 0.05 false]); - dis = pdist2(PopDec,centers); - [~,index] = sort(-dis); - group = index(1:L1,:); - - %% Build GP model of each objective for each cluster - model = cell(csize,Problem.M); - THETA = 5.*ones(csize,Problem.M,Problem.D); - for i = 1 : csize - temp = Population(group(:,i)); - PopDec = temp.decs; - PopObj = temp.objs; - for j = 1 : Problem.M - dmodel = dacefit(PopDec,PopObj(:,j),... - 'regpoly0','corrgauss',... - squeeze(THETA(i,j,:)),... - 1e-5.*ones(1,Problem.D),... - 100.*ones(1,Problem.D)); - model{i,j} = dmodel; - THETA(i,j,:) = dmodel.theta; - end - end -end \ No newline at end of file diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/GPmodelFCM.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/GPmodelFCM.m new file mode 100644 index 000000000..43f7858fb --- /dev/null +++ b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/GPmodelFCM.m @@ -0,0 +1,218 @@ +function [model,centers] = GPmodelFCM(train_x,train_y,L1,L2) +% Fuzzy clustering-based method for modeling c_size* M models, where c_size +% is the number of clusters and M the number of objectives. + +%------------------------------- Copyright -------------------------------- +% Copyright (c) 2024 BIMK Group. You are free to use the PlatEMO for +% research purposes. All publications which use this platform or any code +% in the platform should acknowledge the use of "PlatEMO" and reference "Ye +% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform +% for evolutionary multi-objective optimization [educational forum], IEEE +% Computational Intelligence Magazine, 2017, 12(4): 73-87". +%-------------------------------------------------------------------------- + +% This function was written by Liang Zhao (liazhao5-c@my.cityu.edu.hk). + + [K,M] = size(train_y); + D = size(train_x,2); + if K <= L1 + % all the K evaluated points are directly used for building GP model + csize = 1; + [centers,~] = FCM(train_x,csize,[2 NaN 0.05 false]); + model = cell(1,M); + theta = (K ^ (-1 ./ K)) .* ones(1, D); + for j = 1 : M + model{1,j}= Dacefit(train_x,train_y(:,j),'regpoly0','corrgauss',theta,1e-6*ones(1,D),20*ones(1,D)); + end + else + % FuzzyCM + csize = 1 + ceil((K-L1)/L2); + [centers,~] = FCM(train_x,csize,[2 NaN 0.05 false]); + dis = pdist2(train_x,centers); + [~,index] = sort(dis); + theta = (L1 ^ (-1 ./ L1)) .* ones(1, D); + + %% Build GP model of each objective for each cluster + model = cell(csize, M); + for i = 1 : csize + for j = 1 : M + temp_index = index(1:L1,i); + model{i,j} = Dacefit(train_x(temp_index,:),train_y(temp_index,j), 'regpoly0','corrgauss', theta, 1e-6.*ones(1,D), 20.*ones(1,D)); + end + end + end + +end +% >>>>>>>>>>>>>>>> Auxiliary functions ==================== +function [center, U, obj_fcn] = FCM(data, cluster_n, options) + %FCM Data set clustering using fuzzy c-means clustering. + % + % [CENTER, U, OBJ_FCN] = FCM(DATA, N_CLUSTER) finds N_CLUSTER number of + % clusters in the data set DATA. DATA is size M-by-N, where M is the number of + % data points and N is the number of coordinates for each data point. The + % coordinates for each cluster center are returned in the rows of the matrix + % CENTER. The membership function matrix U contains the grade of membership of + % each DATA point in each cluster. The values 0 and 1 indicate no membership + % and full membership respectively. Grades between 0 and 1 indicate that the + % data point has partial membership in a cluster. At each iteration, an + % objective function is minimized to find the best location for the clusters + % and its values are returned in OBJ_FCN. + % + % [CENTER, ...] = FCM(DATA,N_CLUSTER,OPTIONS) specifies a vector of options + % for the clustering process: + % OPTIONS(1): exponent for the matrix U (default: 2.0) + % OPTIONS(2): maximum number of iterations (default: 100) + % OPTIONS(3): minimum amount of improvement (default: 1e-5) + % OPTIONS(4): info display during iteration (default: 1) + % The clustering process stops when the maximum number of iterations + % is reached, or when the objective function improvement between two + % consecutive iterations is less than the minimum amount of improvement + % specified. Use NaN to select the default value. + % + % Example + % data = rand(100,2); + % [center,U,obj_fcn] = fcm(data,2); + % plot(data(:,1), data(:,2),'o'); + % hold on; + % maxU = max(U); + % % Find the data points with highest grade of membership in cluster 1 + % index1 = find(U(1,:) == maxU); + % % Find the data points with highest grade of membership in cluster 2 + % index2 = find(U(2,:) == maxU); + % line(data(index1,1),data(index1,2),'marker','*','color','g'); + % line(data(index2,1),data(index2,2),'marker','*','color','r'); + % % Plot the cluster centers + % plot([center([1 2],1)],[center([1 2],2)],'*','color','k') + % hold off; + % + % See also FCMDEMO, INITFCM, IRISFCM, DISTFCM, STEPFCM. + + % Roger Jang, 12-13-94, N. Hickey 04-16-01 + % Copyright 1994-2018 The MathWorks, Inc. + + if nargin ~= 2 && nargin ~= 3 + error(message("fuzzy:general:errFLT_incorrectNumInputArguments")) + end + + data_n = size(data, 1); + + % Change the following to set default options + default_options = [2; % exponent for the partition matrix U + 100; % max. number of iteration + 1e-5; % min. amount of improvement + 1]; % info display during iteration + + if nargin == 2 + options = default_options; + else + % If "options" is not fully specified, pad it with default values. + if length(options) < 4 + tmp = default_options; + tmp(1:length(options)) = options; + options = tmp; + end + % If some entries of "options" are nan's, replace them with defaults. + nan_index = find(isnan(options)==1); + options(nan_index) = default_options(nan_index); + if options(1) <= 1 + error(message("fuzzy:general:errFcm_expMustBeGtOne")) + end + end + + expo = options(1); % Exponent for U + max_iter = options(2); % Max. iteration + min_impro = options(3); % Min. improvement + display = options(4); % Display info or not + + obj_fcn = zeros(max_iter, 1); % Array for objective function + + U = initfcm(cluster_n, data_n); % Initial fuzzy partition + % Main loop + for i = 1:max_iter + [U, center, obj_fcn(i)] = stepfcm(data, U, cluster_n, expo); + if display + fprintf('Iteration count = %d, obj. fcn = %f\n', i, obj_fcn(i)); + end + % check termination condition + if i > 1 + if abs(obj_fcn(i) - obj_fcn(i-1)) < min_impro, break; end + end + end + + iter_n = i; % Actual number of iterations + obj_fcn(iter_n+1:max_iter) = []; +end + +function out = distfcm(center, data) + %DISTFCM Distance measure in fuzzy c-mean clustering. + % OUT = DISTFCM(CENTER, DATA) calculates the Euclidean distance + % between each row in CENTER and each row in DATA, and returns a + % distance matrix OUT of size M by N, where M and N are row + % dimensions of CENTER and DATA, respectively, and OUT(I, J) is + % the distance between CENTER(I,:) and DATA(J,:). + % + % See also FCMDEMO, INITFCM, IRISFCM, STEPFCM, and FCM. + + % Roger Jang, 11-22-94, 6-27-95. + % Copyright 1994-2016 The MathWorks, Inc. + + out = zeros(size(center, 1), size(data, 1)); + + % fill the output matrix + + if size(center, 2) > 1 + for k = 1:size(center, 1) + out(k, :) = sqrt(sum(((data-ones(size(data, 1), 1)*center(k, :)).^2), 2)); + end + else % 1-D data + for k = 1:size(center, 1) + out(k, :) = abs(center(k)-data)'; + end + end +end +function U = initfcm(cluster_n, data_n) + %INITFCM Generate initial fuzzy partition matrix for fuzzy c-means clustering. + % U = INITFCM(CLUSTER_N, DATA_N) randomly generates a fuzzy partition + % matrix U that is CLUSTER_N by DATA_N, where CLUSTER_N is number of + % clusters and DATA_N is number of data points. The summation of each + % column of the generated U is equal to unity, as required by fuzzy + % c-means clustering. + % + % See also DISTFCM, FCMDEMO, IRISFCM, STEPFCM, FCM. + + % Roger Jang, 12-1-94. + % Copyright 1994-2002 The MathWorks, Inc. + + U = rand(cluster_n, data_n); + col_sum = sum(U); + U = U./col_sum(ones(cluster_n, 1), :); +end + +function [U_new, center, obj_fcn] = stepfcm(data, U, cluster_n, expo) + %STEPFCM One step in fuzzy c-mean clustering. + % [U_NEW, CENTER, ERR] = STEPFCM(DATA, U, CLUSTER_N, EXPO) + % performs one iteration of fuzzy c-mean clustering, where + % + % DATA: matrix of data to be clustered. (Each row is a data point.) + % U: partition matrix. (U(i,j) is the MF value of data j in cluster j.) + % CLUSTER_N: number of clusters. + % EXPO: exponent (> 1) for the partition matrix. + % U_NEW: new partition matrix. + % CENTER: center of clusters. (Each row is a center.) + % ERR: objective function for partition U. + % + % Note that the situation of "singularity" (one of the data points is + % exactly the same as one of the cluster centers) is not checked. + % However, it hardly occurs in practice. + % + % See also DISTFCM, INITFCM, IRISFCM, FCMDEMO, FCM. + + % Copyright 1994-2014 The MathWorks, Inc. + + mf = U.^expo; % MF matrix after exponential modification + center = mf*data./(sum(mf,2)*ones(1,size(data,2))); %new center + dist = distfcm(center, data); % fill the distance matrix + obj_fcn = sum(sum((dist.^2).*mf)); % objective function + tmp = dist.^(-2/(expo-1)); % calculate new U, suppose expo != 1 + U_new = tmp./(ones(cluster_n, 1)*sum(tmp)); +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/MOEADEGO.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/MOEADEGO.m index d9e730305..c3791e55d 100644 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/MOEADEGO.m +++ b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/MOEADEGO.m @@ -1,11 +1,7 @@ classdef MOEADEGO < ALGORITHM -% +% % MOEA/D with efficient global optimization -% Ke --- 5 --- The number of function evaluations at each generation -% delta --- 0.9 --- The probability of choosing parents locally -% nr --- 2 --- Maximum number of solutions replaced by each offspring -% L1 --- 80 --- The maximal number of points used for building a local model -% L2 --- 20 --- The maximal number of points used for building a local model +% batch_size --- 5 --- Number of true function evaluations per iteration %------------------------------- Reference -------------------------------- % Q. Zhang, W. Liu, E. Tsang, and B. Virginas, Expensive multiobjective @@ -20,24 +16,34 @@ % Computational Intelligence Magazine, 2017, 12(4): 73-87". %-------------------------------------------------------------------------- -% This function is written by Cheng He +% This function was written by Liang Zhao (liazhao5-c@my.cityu.edu.hk). +% - The Java Code of MOEA/D-EGO (written by Wudong Liu) is avaliable at +% https://sites.google.com/view/moead/resources +% - The Matlab Code of MOEA/D-EGO without FuzzyCM (written by Liang ZHAO) is +% avaliable at https://github.com/mobo-d/MOEAD-EGO methods function main(Algorithm,Problem) - %% Parameter setting - [Ke,delta,nr,L1,L2] = Algorithm.ParameterSet(5,0.9,2,80,20); - - %% Generate random population - NI = 11*Problem.D-1; - P = UniformPoint(NI,Problem.D,'Latin'); - Population = Problem.Evaluation(repmat(Problem.upper-Problem.lower,NI,1).*P+repmat(Problem.lower,NI,1)); - L1 = min(L1,length(Population)); - + %% Parameter setting + batch_size = Algorithm.ParameterSet(5); + + %% Generate initial design using LHS or other DOE methods + x_lhs = lhsdesign(11*Problem.D-1, Problem.D,'criterion','maximin','iterations',1000); + x_init = Problem.lower + (Problem.upper - Problem.lower).*x_lhs; + Archive = Problem.Evaluation(x_init); + % find non-dominated solutions + FrontNo = NDSort(Archive.objs,1); + %% Optimization - while Algorithm.NotTerminated(Population) - PopDec = EGOSelect(Problem,Population,L1,L2,Ke,delta,nr); - Offspring = Problem.Evaluation(PopDec); - Population = [Population,Offspring]; + while Algorithm.NotTerminated(Archive(FrontNo==1)) + %% Maximize ETI using MOEA/D and select q candidate points + Batch_size = min(Problem.maxFE - Problem.FE,batch_size); % the total budget is Problem.maxFE + train_x = Archive.decs; train_y = Archive.objs; + new_x = Opt_ETI_FCM(Problem.M,Problem.D,Problem.lower,Problem.upper,Batch_size,train_x,train_y); + + %% Expensive Evaluation + Archive = [Archive,Problem.Evaluation(new_x)]; + FrontNo = NDSort(Archive.objs,1); end end end diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/Opt_ETI_FCM.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/Opt_ETI_FCM.m new file mode 100644 index 000000000..d26be3278 --- /dev/null +++ b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/Opt_ETI_FCM.m @@ -0,0 +1,303 @@ +function new_x = Opt_ETI_FCM(M,D,xlower,xupper,Batch_size,train_x,train_y) +% Maximizing N Subproblems and Selecting Batch of Points +% Expected Tchebycheff Improvement (ETI) + +%------------------------------- Copyright -------------------------------- +% Copyright (c) 2024 BIMK Group. You are free to use the PlatEMO for +% research purposes. All publications which use this platform or any code +% in the platform should acknowledge the use of "PlatEMO" and reference "Ye +% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform +% for evolutionary multi-objective optimization [educational forum], IEEE +% Computational Intelligence Magazine, 2017, 12(4): 73-87". +%-------------------------------------------------------------------------- + +% This function was written by Liang Zhao (liazhao5-c@my.cityu.edu.hk). + + %% Fuzzy clustering the solutions + L1 = 80; + L2 = 20; + [GPModels,centers] = GPmodelFCM(train_x,train_y,L1,L2); + + %% Generate the initial weight vectors + % # of weight vectors:M = 2, 3, 4, 5, 6 + num_weights = [200,210,295,456,462]; + if M <= 3 + [ref_vecs, ~] = UniformPoint(num_weights(M-1),M); % simplex-lattice design + elseif M <= 6 + [ref_vecs, ~] = UniformPoint(num_weights(M-1),M,'ILD'); % incremental lattice design + else + [ref_vecs, ~] = UniformPoint(500,M); % Two-layered SLD + end + + %% Estimate the Utopian point z + z = get_estimation_z(D,xlower,xupper,GPModels,centers,ref_vecs,min(train_y,[],1)); + gmin = get_gmin(train_y,ref_vecs,z); + + %% Using MOEA/D-DE to Maximize ETI + [pop_ETI,candidate_x,~,~] = MOEAD_ETI(D,xlower,xupper,GPModels,centers,ref_vecs,gmin,z); + + %% Select the unsimilar candidate solutions and build candidate pool Q + Q = []; Q_ETI = []; temp = train_x; + for i = 1 : size(candidate_x,1) + if min(pdist2(real(candidate_x(i,:)),real(temp))) > 1e-5 + if pop_ETI(i) > 0 + Q = [Q;candidate_x(i,:)]; Q_ETI = [Q_ETI;pop_ETI(i)]; + temp = [temp;candidate_x(i,:)]; + end + end + end + + new_x = K_means_Batch_Select(Q,Batch_size,candidate_x,Q_ETI) ; +end + +% >>>>>>>>>>>>>>>> Auxiliary functions ==================== +function gmin = get_gmin(D_objs,ref_vecs,z) +% calculate the minimum of Tch for each ref_vec +% g(x|w,z) = max{w1(f1-z1),w2(f2-z2),...} + Objs_translated = D_objs-z; % n*M + G = ref_vecs(:,1)*Objs_translated(:,1)'; % N*n, f1 + for j = 2:size(ref_vecs,2) + G = max(G,ref_vecs(:,j)*Objs_translated(:,j)'); % N*n, max(fi,fj) + end + gmin = min(G,[],2); % N*1 one for each weight vector +end + +function [pop_ETI,pop_x,pop_mean,pop_std] = MOEAD_ETI(D,xlower,xupper,GPModels,centers,ref_vecs,gmin,z) +%% using MOEA/D-DE to solve subproblems + % In order to find the maximum value of ETI for each sub-problem, + % it is recommended to set the maximum number of iterations to at least 50. + %% Parameter setting for MOEA/D-DE + delta=0.9; nr = 2; maxIter = 50; + pop_size = size(ref_vecs,1); + + %% neighbourhood + T = ceil(pop_size/10); % size of neighbourhood + B = pdist2(ref_vecs,ref_vecs); + [~,B] = sort(B,2); + B = B(:,1:T); + + %% the initial population for MOEA/D + pop_x = (xupper-xlower).*lhsdesign(pop_size, D) + xlower; + [pop_mean,pop_std] = GPEvaluate_FCM(pop_x,GPModels,centers); + pop_ETI = get_ETI(pop_mean,pop_std,ref_vecs,gmin,z); + + for gen = 1 : maxIter-1 + for i = 1 : pop_size + if rand < delta + P = B(i,randperm(size(B,2))); + else + P = randperm(pop_size); + end + %% generate an offspring and calculate its predictive mean and s + off_x = operator_DE(pop_x(i,:),pop_x(P(1),:),pop_x(P(2),:), xlower,xupper); + [off_mean,off_std]= GPEvaluate_FCM(off_x,GPModels,centers); + + ETI_new = get_ETI(repmat(off_mean,length(P),1),repmat(off_std,length(P),1),ref_vecs(P,:),gmin(P),z); + + offindex = find(pop_ETI(P)g_new,nr)); + if ~isempty(offindex) + pop_x(offindex,:) = repmat(off_x,length(offindex),1); + pop_mean(offindex,:) = repmat(off_mean,length(offindex),1); + end + end + end +end +function SelectDecs = K_means_Batch_Select(Q,Batch_size,candidate_x,Q_ETI) + batch_size = min(Batch_size,size(Q,1));% in case Q is smaller than Batch size + + if batch_size == 0 + Qb = randperm(size(candidate_x,1),Batch_size); + SelectDecs = candidate_x(Qb,:); + else + cindex = kmeans(Q,batch_size); + Qb = []; + for i = 1 : batch_size + index = find(cindex == i); + [~,best] = max(Q_ETI(index)); + Qb = [Qb,index(best)]; + end + SelectDecs = Q(Qb,:); + end + % when Q is smaller than batch size + if size(SelectDecs,1) < Batch_size + Qb = randperm(size(candidate_x,1), Batch_size - size(SelectDecs,1)); + SelectDecs = [SelectDecs;candidate_x(Qb,:)]; + end +end +% >>>>>>>>>>>>>>>> functions in PlatEMO ==================== +function Offspring = operator_DE(Parent1,Parent2,Parent3, xlower,xupper) +%OperatorDE - The operator of differential evolution. +% +% Off = OperatorDE(P1,P2,P3, xlower,xupper) uses the operator of differential +% evolution to generate offsprings for problem Pro based on parents P1, +% P2, and P3. if P1, P2, and P3 are +% matrices of decision variables, then Off is also a matrix of decision +% variables, i.e., the offsprings are not evaluated. Each object or row +% of P1, P2, and P3 is used to generate one offspring by P1 + 0.5*(P2-P3) +% and polynomial mutation. +% +%------------------------------- Reference -------------------------------- +% H. Li and Q. Zhang, Multiobjective optimization problems with complicated +% Pareto sets, MOEA/D and NSGA-II, IEEE Transactions on Evolutionary +% Computation, 2009, 13(2): 284-302. +%------------------------------- Copyright -------------------------------- +% Copyright (c) 2024 BIMK Group. You are free to use the PlatEMO for +% research purposes. All publications which use this platform or any code +% in the platform should acknowledge the use of "PlatEMO" and reference "Ye +% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform +% for evolutionary multi-objective optimization [educational forum], IEEE +% Computational Intelligence Magazine, 2017, 12(4): 73-87". +%-------------------------------------------------------------------------- + + %% Parameter setting + [CR,F,proM,disM] = deal(1,0.5,1,20); + [N,D] = size(Parent1); + + %% Differental evolution + Site = rand(N,D) < CR; + Offspring = Parent1; + Offspring(Site) = Offspring(Site) + F*(Parent2(Site)-Parent3(Site)); + + %% Polynomial mutation + Lower = repmat(xlower,N,1); + Upper = repmat(xupper,N,1); + Site = rand(N,D) < proM/D; + mu = rand(N,D); + temp = Site & mu<=0.5; + Offspring = min(max(Offspring,Lower),Upper); + Offspring(temp) = Offspring(temp)+(Upper(temp)-Lower(temp)).*((2.*mu(temp)+(1-2.*mu(temp)).*... + (1-(Offspring(temp)-Lower(temp))./(Upper(temp)-Lower(temp))).^(disM+1)).^(1/(disM+1))-1); + temp = Site & mu>0.5; + Offspring(temp) = Offspring(temp)+(Upper(temp)-Lower(temp)).*(1-(2.*(1-mu(temp))+2.*(mu(temp)-0.5).*... + (1-(Upper(temp)-Offspring(temp))./(Upper(temp)-Lower(temp))).^(disM+1)).^(1/(disM+1))); +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/dacefit.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/dacefit.m deleted file mode 100644 index 09bd2376c..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/dacefit.m +++ /dev/null @@ -1,358 +0,0 @@ -function [dmodel,perf] = dacefit(S,Y,regr,corr,theta0,lob,upb) -%dacefit - Constrained non-linear least-squares fit of a given correlation -%model to the provided data set and regression model -% -% Call -% [dmodel, perf] = dacefit(S, Y, regr, corr, theta0) -% [dmodel, perf] = dacefit(S, Y, regr, corr, theta0, lob, upb) -% -% Input -% S, Y : Data points (S(i,:), Y(i,:)), i = 1,...,m -% regr : Function handle to a regression model -% corr : Function handle to a correlation function -% theta0 : Initial guess on theta, the correlation function parameters -% lob,upb : If present, then lower and upper bounds on theta -% Otherwise, theta0 is used for theta -% -% Output -% dmodel : DACE model: a struct with the elements -% regr : function handle to the regression model -% corr : function handle to the correlation function -% theta : correlation function parameters -% beta : generalized least squares estimate -% gamma : correlation factors -% sigma2 : maximum likelihood estimate of the process variance -% S : scaled design sites -% Ssc : scaling factors for design arguments -% Ysc : scaling factors for design ordinates -% C : Cholesky factor of correlation matrix -% Ft : Decorrelated regression matrix -% G : From QR factorization: Ft = Q*G' . -% perf : struct with performance information. Elements -% nv : Number of evaluations of objective function -% perf : (q+2)*nv array, where q is the number of elements -% in theta, and the columns hold current values of -% [theta; psi(theta); type] -% |type| = 1, 2 or 3, indicate 'start', 'explore' or 'move' -% A negative value for type indicates an uphill step - -% hbn@imm.dtu.dk -% Last update September 3, 2002 - - % Check design points - [m,n] = size(S); % number of design sites and their dimension - sY = size(Y); - if min(sY) == 1 - Y = Y(:); - lY = max(sY); - else - lY = sY(1); - end - if m ~= lY - error('S and Y must have the same number of rows') - end - % Check correlation parameters if it is given - lth = length(theta0); - if nargin > 5 % optimization case - if length(lob) ~= lth || length(upb) ~= lth - error('theta0, lob and upb must have the same length') - end - if any(lob <= 0) || any(upb < lob) - error('The bounds must satisfy 0 < lob <= upb') - end - else % given theta - if any(theta0 <= 0) - error('theta0 must be strictly positive') - end - end - % Normalize data - mS = mean(S); sS = std(S); - mY = mean(Y); sY = std(Y); - % 02.08.27: Check for 'missing dimension' - j = find(sS == 0); - if ~isempty(j) - sS(j) = 1; - end - j = find(sY == 0); - if ~isempty(j) - sY(j) = 1; - end - S = (S - repmat(mS,m,1)) ./ repmat(sS,m,1); - Y = (Y - repmat(mY,m,1)) ./ repmat(sY,m,1); - % Calculate distances D between points - mzmax = m*(m-1) / 2; % number of non-zero distances - ij = zeros(mzmax, 2); % initialize matrix with indices - D = zeros(mzmax, n); % initialize matrix with distances - LL = 0; - for k = 1 : m-1 - LL = LL(end) + (1 : m-k); - ij(LL,:) = [repmat(k,m-k,1) (k+1:m)']; % indices for sparse matrix - D(LL,:) = repmat(S(k,:),m-k,1)-S(k+1:m,:); % differences between points - end - if min(sum(abs(D),2) ) == 0 - error('Multiple design sites are not allowed') - end - % Regression matrix - F = feval(regr, S); - [mF,p] = size(F); - if mF ~= m - error('number of rows in F and S do not match') - end - if p > mF - error('least squares problem is underdetermined') - end - % parameters for objective function - par = struct('corr',corr,'regr',regr,'y',Y,'F',F,'D',D,'ij',ij,'scS',sS); - % Determine theta - if nargin > 5 - % Bound constrained non-linear optimization - [theta, f, fit, perf] = boxmin(theta0, lob, upb, par); - if isinf(f) - error('Bad parameter region. Try increasing upb') - end - else - % Given theta - theta = theta0(:); - [f,fit] = objfunc(theta, par); - perf = struct('perf',[theta; f; 1], 'nv',1); - if isinf(f) - error('Bad point. Try increasing theta0') - end - end - % Return values - dmodel = struct('regr',regr,'corr',corr,'theta',theta.','beta',fit.beta,... - 'gamma',fit.gamma,'sigma2',sY.^2.*fit.sigma2,'S',S,'Ssc',[mS; sS],... - 'Ysc',[mY; sY],'C',fit.C,'Ft',fit.Ft,'G',fit.G); -end - -function [obj, fit] = objfunc(theta, par) - % Initialize - obj = inf; - fit = struct('sigma2',NaN,'beta',NaN,'gamma',NaN,'C',NaN,'Ft',NaN,'G',NaN); - m = size(par.F,1); - % Set up R - r = feval(par.corr, theta, par.D); - idx = find(r > 0); o = (1 : m)'; - mu = (10+m)*eps; - R = sparse([par.ij(idx,1); o],[par.ij(idx,2); o],[r(idx); ones(m,1)+mu]); - % Cholesky factorization with check for pos. def. - [C,rd] = chol(R); - if rd - return; - end - % Get least squares solution - C = C'; - Ft = C \ par.F; - [Q,G] = qr(Ft,0); - if rcond(G) < 1e-10 - % Check F - if cond(par.F) > 1e15 - error('F is too ill conditioned\nPoor combination of regression model and design sites') - else % Matrix Ft is too ill conditioned - return - end - end - Yt = C \ par.y; - beta = G \ (Q'*Yt); - rho = Yt - Ft*beta; sigma2 = sum(rho.^2)/m; - detR = prod( full(diag(C)) .^ (2/m) ); - obj = sum(sigma2) * detR; - if nargout > 1 - fit = struct('sigma2',sigma2,'beta',beta,'gamma',rho'/C,'C',C,'Ft',Ft,'G',G'); - end -end - -function [t,f,fit,perf] = boxmin(t0,lo,up,par) -%BOXMIN Minimize with positive box constraints - - % Initialize - [t, f, fit, itpar] = start(t0, lo, up, par); - if ~isinf(f) - % Iterate - p = length(t); - if p <= 2 - kmax = 2; - else - kmax = min(p,4); - end - for k = 1 : kmax - th = t; - [t, f, fit, itpar] = explore(t, f, fit, itpar, par); - [t, f, fit, itpar] = move(th, t, f, fit, itpar, par); - end - end - perf = struct('nv',itpar.nv, 'perf',itpar.perf(:,1:itpar.nv)); -end - -function [t,f,fit,itpar] = start(t0,lo,up,par) -% Get starting point and iteration parameters - - % Initialize - t = t0(:); - lo = lo(:); - up = up(:); - p = length(t); - D = 2 .^((1:p)'/(p+2)); - ee = find(up == lo); % Equality constraints - if ~isempty(ee) - D(ee) = ones(length(ee),1); - t(ee) = up(ee); - end - ng = find(t < lo | up < t); % Free starting values - if ~isempty(ng) - t(ng) = (lo(ng) .* up(ng).^7).^(1/8); % Starting point - end - ne = find(D ~= 1); - % Check starting point and initialize performance info - [f,fit] = objfunc(t,par); - nv = 1; - itpar = struct('D',D,'ne',ne,'lo',lo,'up',up,'perf',zeros(p+2,200*p),'nv',1); - itpar.perf(:,1) = [t; f; 1]; - if isinf(f) % Bad parameter region - return - end - if length(ng) > 1 % Try to improve starting guess - d0 = 16; d1 = 2; q = length(ng); - th = t; fh = f; jdom = ng(1); - for k = 1 : q - j = ng(k); - fk = fh; - tk = th; - DD = ones(p,1); DD(ng) = repmat(1/d1,q,1); DD(j) = 1/d0; - alpha = min(log(lo(ng) ./ th(ng)) ./ log(DD(ng))) / 5; - v = DD .^ alpha; - for rept = 1 : 4 - tt = tk .* v; - [ff, fitt] = objfunc(tt,par); nv = nv+1; - itpar.perf(:,nv) = [tt; ff; 1]; - if ff <= fk - tk = tt; - fk = ff; - if ff <= f - t = tt; - f = ff; - fit = fitt; - jdom = j; - end - else - itpar.perf(end,nv) = -1; - break - end - end - end % improve - % Update Delta - if jdom > 1 - D([1 jdom]) = D([jdom 1]); - itpar.D = D; - end - end % free variables - itpar.nv = nv; -end - -function [t,f,fit,itpar] = explore(t,f,fit,itpar,par) -% Explore step - - nv = itpar.nv; - ne = itpar.ne; - for k = 1 : length(ne) - j = ne(k); - tt = t; - DD = itpar.D(j); - if t(j) == itpar.up(j) - atbd = 1; - tt(j) = t(j) / sqrt(DD); - elseif t(j) == itpar.lo(j) - atbd = 1; - tt(j) = t(j) * sqrt(DD); - else - atbd = 0; - tt(j) = min(itpar.up(j), t(j)*DD); - end - [ff,fitt] = objfunc(tt,par); - nv = nv+1; - itpar.perf(:,nv) = [tt; ff; 2]; - if ff < f - t = tt; - f = ff; - fit = fitt; - else - itpar.perf(end,nv) = -2; - if ~atbd % try decrease - tt(j) = max(itpar.lo(j), t(j)/DD); - [ff,fitt] = objfunc(tt,par); - nv = nv+1; - itpar.perf(:,nv) = [tt; ff; 2]; - if ff < f - t = tt; - f = ff; - fit = fitt; - else - itpar.perf(end,nv) = -2; - end - end - end - end - itpar.nv = nv; -end - -function [t,f,fit,itpar] = move(th,t,f,fit,itpar,par) -% Pattern move - - nv = itpar.nv; - p = length(t); - v = t ./ th; - if all(v == 1) - itpar.D = itpar.D([2:p 1]).^.2; - return; - end - % Proper move - rept = 1; - while rept - tt = min(itpar.up, max(itpar.lo, t .* v)); - [ff,fitt] = objfunc(tt,par); - nv = nv+1; - itpar.perf(:,nv) = [tt; ff; 3]; - if ff < f - t = tt; - f = ff; - fit = fitt; - v = v .^ 2; - else - itpar.perf(end,nv) = -3; - rept = 0; - end - if any(tt == itpar.lo | tt == itpar.up) - rept = 0; - end - end - itpar.nv = nv; - itpar.D = itpar.D([2:p 1]).^.25; -end - -function [r,dr] = corrgauss(theta,d) -%CORRGAUSS Gaussian correlation function, - - [m,n] = size(d); % number of differences and dimension of data - if length(theta) == 1 - theta = repmat(theta,1,n); - elseif length(theta) ~= n - error('Length of theta must be 1 or %d',n) - end - td = d.^2 .* repmat(-theta(:).',m,1); - r = exp(sum(td, 2)); - dr = repmat(-2*theta(:).',m,1) .* d .* repmat(r,1,n); -end - -function [f,df] = regpoly0(S) -%REGPOLY0 Zero order polynomial regression function - - f = ones(size(S,1),1); - df = zeros(size(S,2),1); -end - -function [f,df] = regpoly1(S) -%REGPOLY1 First order polynomial regression function - - f = [ones(size(S,1),1),S]; - df = [zeros(size(S,2),1),eye(size(S,2))]; -end \ No newline at end of file diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/distfcm.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/distfcm.m deleted file mode 100644 index ea35b6a7c..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/distfcm.m +++ /dev/null @@ -1,26 +0,0 @@ -function out = distfcm(center, data) -%DISTFCM Distance measure in fuzzy c-mean clustering. -% OUT = DISTFCM(CENTER, DATA) calculates the Euclidean distance -% between each row in CENTER and each row in DATA, and returns a -% distance matrix OUT of size M by N, where M and N are row -% dimensions of CENTER and DATA, respectively, and OUT(I, J) is -% the distance between CENTER(I,:) and DATA(J,:). -% -% See also FCMDEMO, INITFCM, IRISFCM, STEPFCM, and FCM. - -% Roger Jang, 11-22-94, 6-27-95. -% Copyright 1994-2016 The MathWorks, Inc. - -out = zeros(size(center, 1), size(data, 1)); - -% fill the output matrix - -if size(center, 2) > 1 - for k = 1:size(center, 1) - out(k, :) = sqrt(sum(((data-ones(size(data, 1), 1)*center(k, :)).^2), 2)); - end -else % 1-D data - for k = 1:size(center, 1) - out(k, :) = abs(center(k)-data)'; - end -end diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/fcm.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/fcm.m deleted file mode 100644 index a648c2b82..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/fcm.m +++ /dev/null @@ -1,97 +0,0 @@ -function [center, U, obj_fcn] = fcm(data, cluster_n, options) -%FCM Data set clustering using fuzzy c-means clustering. -% -% [CENTER, U, OBJ_FCN] = FCM(DATA, N_CLUSTER) finds N_CLUSTER number of -% clusters in the data set DATA. DATA is size M-by-N, where M is the number of -% data points and N is the number of coordinates for each data point. The -% coordinates for each cluster center are returned in the rows of the matrix -% CENTER. The membership function matrix U contains the grade of membership of -% each DATA point in each cluster. The values 0 and 1 indicate no membership -% and full membership respectively. Grades between 0 and 1 indicate that the -% data point has partial membership in a cluster. At each iteration, an -% objective function is minimized to find the best location for the clusters -% and its values are returned in OBJ_FCN. -% -% [CENTER, ...] = FCM(DATA,N_CLUSTER,OPTIONS) specifies a vector of options -% for the clustering process: -% OPTIONS(1): exponent for the matrix U (default: 2.0) -% OPTIONS(2): maximum number of iterations (default: 100) -% OPTIONS(3): minimum amount of improvement (default: 1e-5) -% OPTIONS(4): info display during iteration (default: 1) -% The clustering process stops when the maximum number of iterations -% is reached, or when the objective function improvement between two -% consecutive iterations is less than the minimum amount of improvement -% specified. Use NaN to select the default value. -% -% Example -% data = rand(100,2); -% [center,U,obj_fcn] = fcm(data,2); -% plot(data(:,1), data(:,2),'o'); -% hold on; -% maxU = max(U); -% % Find the data points with highest grade of membership in cluster 1 -% index1 = find(U(1,:) == maxU); -% % Find the data points with highest grade of membership in cluster 2 -% index2 = find(U(2,:) == maxU); -% line(data(index1,1),data(index1,2),'marker','*','color','g'); -% line(data(index2,1),data(index2,2),'marker','*','color','r'); -% % Plot the cluster centers -% plot([center([1 2],1)],[center([1 2],2)],'*','color','k') -% hold off; -% -% See also FCMDEMO, INITFCM, IRISFCM, DISTFCM, STEPFCM. - -% Roger Jang, 12-13-94, N. Hickey 04-16-01 -% Copyright 1994-2018 The MathWorks, Inc. - -if nargin ~= 2 && nargin ~= 3 - error(message("fuzzy:general:errFLT_incorrectNumInputArguments")) -end - -data_n = size(data, 1); - -% Change the following to set default options -default_options = [2; % exponent for the partition matrix U - 100; % max. number of iteration - 1e-5; % min. amount of improvement - 1]; % info display during iteration - -if nargin == 2 - options = default_options; -else - % If "options" is not fully specified, pad it with default values. - if length(options) < 4 - tmp = default_options; - tmp(1:length(options)) = options; - options = tmp; - end - % If some entries of "options" are nan's, replace them with defaults. - nan_index = find(isnan(options)==1); - options(nan_index) = default_options(nan_index); - if options(1) <= 1 - error(message("fuzzy:general:errFcm_expMustBeGtOne")) - end -end - -expo = options(1); % Exponent for U -max_iter = options(2); % Max. iteration -min_impro = options(3); % Min. improvement -display = options(4); % Display info or not - -obj_fcn = zeros(max_iter, 1); % Array for objective function - -U = initfcm(cluster_n, data_n); % Initial fuzzy partition -% Main loop -for i = 1:max_iter - [U, center, obj_fcn(i)] = stepfcm(data, U, cluster_n, expo); - if display - fprintf('Iteration count = %d, obj. fcn = %f\n', i, obj_fcn(i)); - end - % check termination condition - if i > 1 - if abs(obj_fcn(i) - obj_fcn(i-1)) < min_impro, break; end - end -end - -iter_n = i; % Actual number of iterations -obj_fcn(iter_n+1:max_iter) = []; diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/initfcm.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/initfcm.m deleted file mode 100644 index 3b9cefb5d..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/initfcm.m +++ /dev/null @@ -1,16 +0,0 @@ -function U = initfcm(cluster_n, data_n) -%INITFCM Generate initial fuzzy partition matrix for fuzzy c-means clustering. -% U = INITFCM(CLUSTER_N, DATA_N) randomly generates a fuzzy partition -% matrix U that is CLUSTER_N by DATA_N, where CLUSTER_N is number of -% clusters and DATA_N is number of data points. The summation of each -% column of the generated U is equal to unity, as required by fuzzy -% c-means clustering. -% -% See also DISTFCM, FCMDEMO, IRISFCM, STEPFCM, FCM. - -% Roger Jang, 12-1-94. -% Copyright 1994-2002 The MathWorks, Inc. - -U = rand(cluster_n, data_n); -col_sum = sum(U); -U = U./col_sum(ones(cluster_n, 1), :); diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/predictor.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/predictor.m deleted file mode 100644 index d167ece53..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/predictor.m +++ /dev/null @@ -1,143 +0,0 @@ -function [y,or1,or2,dmse] = predictor(x,dmodel) -%PREDICTOR Predictor for y(x) using the given DACE model. -% -% Call: y = predictor(x, dmodel) -% [y, or] = predictor(x, dmodel) -% [y, dy, mse] = predictor(x, dmodel) -% [y, dy, mse, dmse] = predictor(x, dmodel) -% -% Input -% x : trial design sites with n dimensions. -% For mx trial sites x: -% If mx = 1, then both a row and a column vector is accepted, -% otherwise, x must be an mx*n matrix with the sites stored -% rowwise. -% dmodel : Struct with DACE model; see DACEFIT -% -% Output -% y : predicted response at x. -% or : If mx = 1, then or = gradient vector/Jacobian matrix of predictor -% otherwise, or is an vector with mx rows containing the estimated -% mean squared error of the predictor -% Three or four results are allowed only when mx = 1, -% dy : Gradient of predictor; column vector with n elements -% mse : Estimated mean squared error of the predictor; -% dmse : Gradient vector/Jacobian matrix of mse - -% hbn@imm.dtu.dk -% Last update August 26, 2002 - - or1 = NaN; or2 = NaN; dmse = NaN; % Default return values - if isnan(dmodel.beta) - error('DMODEL has not been found') - end - [m,n] = size(dmodel.S); % number of design sites and number of dimensions - sx = size(x); % number of trial sites and their dimension - if min(sx) == 1 && n > 1 % Single trial point - nx = max(sx); - if nx == n - mx = 1; - x = x(:).'; - end - else - mx = sx(1); - nx = sx(2); - end - if nx ~= n - error('Dimension of trial sites should be %d',n) - end - % Normalize trial sites - x = (x - repmat(dmodel.Ssc(1,:),mx,1)) ./ repmat(dmodel.Ssc(2,:),mx,1); - q = size(dmodel.Ysc,2); % number of response functions - if mx == 1 % one site only - dx = repmat(x,m,1) - dmodel.S; % distances to design sites - if nargout > 1 % gradient/Jacobian wanted - [f,df] = feval(dmodel.regr, x); - [r,dr] = feval(dmodel.corr, dmodel.theta, dx); - % Scaled Jacobian - dy = (df * dmodel.beta).' + dmodel.gamma * dr; - % Unscaled Jacobian - or1 = dy .* repmat(dmodel.Ysc(2, :)', 1, nx) ./ repmat(dmodel.Ssc(2,:), q, 1); - if q == 1 - % Gradient as a column vector - or1 = or1'; - end - if nargout > 2 % MSE wanted - rt = dmodel.C \ r; - u = dmodel.Ft.' * rt - f.'; - v = dmodel.G \ u; - or2 = repmat(dmodel.sigma2,mx,1) .* repmat((1 + sum(v.^2) - sum(rt.^2))',1,q); - if nargout > 3 % gradient/Jacobian of MSE wanted - % Scaled gradient as a row vector - Gv = dmodel.G' \ v; - g = (dmodel.Ft * Gv - rt)' * (dmodel.C \ dr) - (df * Gv)'; - % Unscaled Jacobian - dmse = repmat(2 * dmodel.sigma2',1,nx) .* repmat(g ./ dmodel.Ssc(2,:),q,1); - if q == 1 - % Gradient as a column vector - dmse = dmse'; - end - end - end - else % predictor only - f = feval(dmodel.regr, x); - r = feval(dmodel.corr, dmodel.theta, dx); - end - % Scaled predictor - sy = f * dmodel.beta + (dmodel.gamma*r).'; - % Predictor - y = (dmodel.Ysc(1,:) + dmodel.Ysc(2,:) .* sy)'; - else % several trial sites - % Get distances to design sites - dx = zeros(mx*m,n); - kk = 1 : m; - for k = 1 : mx - dx(kk,:) = repmat(x(k,:),m,1) - dmodel.S; - kk = kk + m; - end - % Get regression function and correlation - f = feval(dmodel.regr, x); - r = feval(dmodel.corr, dmodel.theta, dx); - r = reshape(r, m, mx); - % Scaled predictor - sy = f * dmodel.beta + (dmodel.gamma * r).'; - % Predictor - y = repmat(dmodel.Ysc(1,:),mx,1) + repmat(dmodel.Ysc(2,:),mx,1) .* sy; - if nargout > 1 % MSE wanted - rt = dmodel.C \ r; - u = dmodel.G \ (dmodel.Ft.' * rt - f.'); - or1 = repmat(dmodel.sigma2,mx,1) .* repmat((1 + sum(u.^2,1) - sum(rt.^2,1))',1,q); - if nargout > 2 - disp('WARNING from PREDICTOR. Only y and or1=mse are computed') - end - end - end -end - -function [r,dr] = corrgauss(theta,d) -%CORRGAUSS Gaussian correlation function, - - [m,n] = size(d); % number of differences and dimension of data - if length(theta) == 1 - theta = repmat(theta,1,n); - elseif length(theta) ~= n - error('Length of theta must be 1 or %d',n) - end - td = d.^2 .* repmat(-theta(:).',m,1); - r = exp(sum(td, 2)); - dr = repmat(-2*theta(:).',m,1) .* d .* repmat(r,1,n); -end - -function [f,df] = regpoly0(S) -%REGPOLY0 Zero order polynomial regression function - - f = ones(size(S,1),1); - df = zeros(size(S,2),1); -end - -function [f,df] = regpoly1(S) -%REGPOLY1 First order polynomial regression function - - f = [ones(size(S,1),1),S]; - df = [zeros(size(S,2),1),eye(size(S,2))]; -end \ No newline at end of file diff --git a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/stepfcm.m b/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/stepfcm.m deleted file mode 100644 index 07c0a003b..000000000 --- a/PlatEMO/Algorithms/Multi-objective optimization/MOEA-D-EGO/stepfcm.m +++ /dev/null @@ -1,27 +0,0 @@ -function [U_new, center, obj_fcn] = stepfcm(data, U, cluster_n, expo) -%STEPFCM One step in fuzzy c-mean clustering. -% [U_NEW, CENTER, ERR] = STEPFCM(DATA, U, CLUSTER_N, EXPO) -% performs one iteration of fuzzy c-mean clustering, where -% -% DATA: matrix of data to be clustered. (Each row is a data point.) -% U: partition matrix. (U(i,j) is the MF value of data j in cluster j.) -% CLUSTER_N: number of clusters. -% EXPO: exponent (> 1) for the partition matrix. -% U_NEW: new partition matrix. -% CENTER: center of clusters. (Each row is a center.) -% ERR: objective function for partition U. -% -% Note that the situation of "singularity" (one of the data points is -% exactly the same as one of the cluster centers) is not checked. -% However, it hardly occurs in practice. -% -% See also DISTFCM, INITFCM, IRISFCM, FCMDEMO, FCM. - -% Copyright 1994-2014 The MathWorks, Inc. - -mf = U.^expo; % MF matrix after exponential modification -center = mf*data./(sum(mf,2)*ones(1,size(data,2))); %new center -dist = distfcm(center, data); % fill the distance matrix -obj_fcn = sum(sum((dist.^2).*mf)); % objective function -tmp = dist.^(-2/(expo-1)); % calculate new U, suppose expo != 1 -U_new = tmp./(ones(cluster_n, 1)*sum(tmp)); diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/Dacefit.m b/PlatEMO/Algorithms/surrogate_models/DACE/Dacefit.m new file mode 100644 index 000000000..735d0daf4 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/Dacefit.m @@ -0,0 +1,298 @@ +function [dmodel, perf] = Dacefit(S, Y, regr, corr, theta0, lob, upb) +%DACEFIT Constrained non-linear least-squares fit of a given correlation +% model to the provided data set and regression model +% +% Call +% [dmodel, perf] = dacefit(S, Y, regr, corr, theta0) +% [dmodel, perf] = dacefit(S, Y, regr, corr, theta0, lob, upb) +% +% Input +% S, Y : Data points (S(i,:), Y(i,:)), i = 1,...,m +% regr : Function handle to a regression model +% corr : Function handle to a correlation function +% theta0 : Initial guess on theta, the correlation function parameters +% lob,upb : If present, then lower and upper bounds on theta +% Otherwise, theta0 is used for theta +% +% Output +% dmodel : DACE model: a struct with the elements +% regr : function handle to the regression model +% corr : function handle to the correlation function +% theta : correlation function parameters +% beta : generalized least squares estimate +% gamma : correlation factors +% sigma2 : maximum likelihood estimate of the process variance +% S : scaled design sites +% Ssc : scaling factors for design arguments +% Ysc : scaling factors for design ordinates +% C : Cholesky factor of correlation matrix +% Ft : Decorrelated regression matrix +% G : From QR factorization: Ft = Q*G' . +% perf : struct with performance information. Elements +% nv : Number of evaluations of objective function +% perf : (q+2)*nv array, where q is the number of elements +% in theta, and the columns hold current values of +% [theta; psi(theta); type] +% |type| = 1, 2 or 3, indicate 'start', 'explore' or 'move' +% A negative value for type indicates an uphill step + +% hbn@imm.dtu.dk +% Last update September 3, 2002 + +% Check design points +[m n] = size(S); % number of design sites and their dimension +sY = size(Y); +if min(sY) == 1, Y = Y(:); lY = max(sY); sY = size(Y); +else, lY = sY(1); end +if m ~= lY + error('S and Y must have the same number of rows'), end + +% Check correlation parameters +lth = length(theta0); +if nargin > 5 % optimization case + if length(lob) ~= lth | length(upb) ~= lth + error('theta0, lob and upb must have the same length'), end + if any(lob <= 0) | any(upb < lob) + error('The bounds must satisfy 0 < lob <= upb'), end +else % given theta + if any(theta0 <= 0) + error('theta0 must be strictly positive'), end +end + +% Normalize data +mS = mean(S); sS = std(S); +mY = mean(Y); sY = std(Y); +% 02.08.27: Check for 'missing dimension' +j = find(sS == 0); +if ~isempty(j), sS(j) = 1; end +j = find(sY == 0); +if ~isempty(j), sY(j) = 1; end +S = (S - repmat(mS,m,1)) ./ repmat(sS,m,1); +Y = (Y - repmat(mY,m,1)) ./ repmat(sY,m,1); + +% Calculate distances D between points +mzmax = m*(m-1) / 2; % number of non-zero distances +ij = zeros(mzmax, 2); % initialize matrix with indices +D = zeros(mzmax, n); % initialize matrix with distances +ll = 0; +for k = 1 : m-1 + ll = ll(end) + (1 : m-k); + ij(ll,:) = [repmat(k, m-k, 1) (k+1 : m)']; % indices for sparse matrix + D(ll,:) = repmat(S(k,:), m-k, 1) - S(k+1:m,:); % differences between points +end +% if min(sum(abs(D),2) ) == 0 +% error('Multiple design sites are not allowed'), end + +% Regression matrix +F = feval(regr, S); [mF p] = size(F); +if mF ~= m, error('number of rows in F and S do not match'), end +if p > mF, error('least squares problem is underdetermined'), end + +% parameters for objective function +par = struct('corr',corr, 'regr',regr, 'y',Y, 'F',F, ... + 'D', D, 'ij',ij, 'scS',sS); + +% Determine theta +if nargin > 5 + % Bound constrained non-linear optimization + [theta f fit perf] = boxmin(theta0, lob, upb, par); + if isinf(f) + error('Bad parameter region. Try increasing upb'), end +else + % Given theta + theta = theta0(:); + [f fit] = objfunc(theta, par); + perf = struct('perf',[theta; f; 1], 'nv',1); + if isinf(f) + error('Bad point. Try increasing theta0'), end +end + +% Return values +dmodel = struct('regr',regr, 'corr',corr, 'theta',theta.', ... + 'beta',fit.beta, 'gamma',fit.gamma, 'sigma2',sY.^2.*fit.sigma2, ... + 'S',S, 'Ssc',[mS; sS], 'Ysc',[mY; sY], ... + 'C',fit.C, 'Ft',fit.Ft, 'G',fit.G); + +% >>>>>>>>>>>>>>>> Auxiliary functions ==================== + +function [obj, fit] = objfunc(theta, par) +% Initialize +obj = inf; +fit = struct('sigma2',NaN, 'beta',NaN, 'gamma',NaN, ... + 'C',NaN, 'Ft',NaN, 'G',NaN); +m = size(par.F,1); +% Set up R +r = feval(par.corr, theta, par.D); +idx = find(r > 0); o = (1 : m)'; +mu = (10+m)*eps; +R = sparse([par.ij(idx,1); o], [par.ij(idx,2); o], ... + [r(idx); ones(m,1)+mu]); +% Cholesky factorization with check for pos. def. +[C rd] = chol(R); +if rd, return, end % not positive definite + +% Get least squares solution +C = C'; Ft = C \ par.F; +[Q G] = qr(Ft,0); +if rcond(G) < 1e-10 + % Check F + if cond(par.F) > 1e15 + T = sprintf('F is too ill conditioned\nPoor combination of regression model and design sites'); + error(T) + else % Matrix Ft is too ill conditioned + return + end +end +Yt = C \ par.y; beta = G \ (Q'*Yt); +rho = Yt - Ft*beta; sigma2 = sum(rho.^2)/m; +detR = prod( full(diag(C)) .^ (2/m) ); +obj = sum(sigma2) * detR; +if nargout > 1 + fit = struct('sigma2',sigma2, 'beta',beta, 'gamma',rho' / C, ... + 'C',C, 'Ft',Ft, 'G',G'); +end + +% -------------------------------------------------------- + +function [t, f, fit, perf] = boxmin(t0, lo, up, par) +%BOXMIN Minimize with positive box constraints + +% Initialize +[t, f, fit, itpar] = start(t0, lo, up, par); +if ~isinf(f) + % Iterate + p = length(t); + if p <= 2, kmax = 2; else, kmax = min(p,4); end + for k = 1 : kmax + th = t; + [t, f, fit, itpar] = explore(t, f, fit, itpar, par); + [t, f, fit, itpar] = move(th, t, f, fit, itpar, par); + end +end +perf = struct('nv',itpar.nv, 'perf',itpar.perf(:,1:itpar.nv)); + +% -------------------------------------------------------- + +function [t, f, fit, itpar] = start(t0, lo, up, par) +% Get starting point and iteration parameters + +% Initialize +t = t0(:); lo = lo(:); up = up(:); p = length(t); +D = 2 .^ ([1:p]'/(p+2)); +ee = find(up == lo); % Equality constraints +if ~isempty(ee) + D(ee) = ones(length(ee),1); t(ee) = up(ee); +end +ng = find(t < lo | up < t); % Free starting values +if ~isempty(ng) + t(ng) = (lo(ng) .* up(ng).^7).^(1/8); % Starting point +end +ne = find(D ~= 1); + +% Check starting point and initialize performance info +[f fit] = objfunc(t,par); nv = 1; +itpar = struct('D',D, 'ne',ne, 'lo',lo, 'up',up, ... + 'perf',zeros(p+2,200*p), 'nv',1); +itpar.perf(:,1) = [t; f; 1]; +if isinf(f) % Bad parameter region + return +end + +if length(ng) > 1 % Try to improve starting guess + d0 = 16; d1 = 2; q = length(ng); + th = t; fh = f; jdom = ng(1); + for k = 1 : q + j = ng(k); fk = fh; tk = th; + DD = ones(p,1); DD(ng) = repmat(1/d1,q,1); DD(j) = 1/d0; + alpha = min(log(lo(ng) ./ th(ng)) ./ log(DD(ng))) / 5; + v = DD .^ alpha; tk = th; + for rept = 1 : 4 + tt = tk .* v; + [ff fitt] = objfunc(tt,par); nv = nv+1; + itpar.perf(:,nv) = [tt; ff; 1]; + if ff <= fk + tk = tt; fk = ff; + if ff <= f + t = tt; f = ff; fit = fitt; jdom = j; + end + else + itpar.perf(end,nv) = -1; break + end + end + end % improve + + % Update Delta + if jdom > 1 + D([1 jdom]) = D([jdom 1]); + itpar.D = D; + end +end % free variables + +itpar.nv = nv; + +% -------------------------------------------------------- + +function [t, f, fit, itpar] = explore(t, f, fit, itpar, par) +% Explore step + +nv = itpar.nv; ne = itpar.ne; +for k = 1 : length(ne) + j = ne(k); tt = t; DD = itpar.D(j); + if t(j) == itpar.up(j) + atbd = 1; tt(j) = t(j) / sqrt(DD); + elseif t(j) == itpar.lo(j) + atbd = 1; tt(j) = t(j) * sqrt(DD); + else + atbd = 0; tt(j) = min(itpar.up(j), t(j)*DD); + end + [ff fitt] = objfunc(tt,par); nv = nv+1; + itpar.perf(:,nv) = [tt; ff; 2]; + if ff < f + t = tt; f = ff; fit = fitt; + else + itpar.perf(end,nv) = -2; + if ~atbd % try decrease + tt(j) = max(itpar.lo(j), t(j)/DD); + [ff fitt] = objfunc(tt,par); nv = nv+1; + itpar.perf(:,nv) = [tt; ff; 2]; + if ff < f + t = tt; f = ff; fit = fitt; + else + itpar.perf(end,nv) = -2; + end + end + end +end % k + +itpar.nv = nv; + +% -------------------------------------------------------- + +function [t, f, fit, itpar] = move(th, t, f, fit, itpar, par) +% Pattern move + +nv = itpar.nv; ne = itpar.ne; p = length(t); +v = t ./ th; +if all(v == 1) + itpar.D = itpar.D([2:p 1]).^.2; + return +end + +% Proper move +rept = 1; +while rept + tt = min(itpar.up, max(itpar.lo, t .* v)); + [ff fitt] = objfunc(tt,par); nv = nv+1; + itpar.perf(:,nv) = [tt; ff; 3]; + if ff < f + t = tt; f = ff; fit = fitt; + v = v .^ 2; + else + itpar.perf(end,nv) = -3; + rept = 0; + end + if any(tt == itpar.lo | tt == itpar.up), rept = 0; end +end + +itpar.nv = nv; +itpar.D = itpar.D([2:p 1]).^.25; diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/Predictor.m b/PlatEMO/Algorithms/surrogate_models/DACE/Predictor.m new file mode 100644 index 000000000..107c251b5 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/Predictor.m @@ -0,0 +1,132 @@ +function [y, or1, or2, dmse] = Predictor(x, dmodel) +%PREDICTOR Predictor for y(x) using the given DACE model. +% +% Call: y = predictor(x, dmodel) +% [y, or] = predictor(x, dmodel) +% [y, dy, mse] = predictor(x, dmodel) +% [y, dy, mse, dmse] = predictor(x, dmodel) +% +% Input +% x : trial design sites with n dimensions. +% For mx trial sites x: +% If mx = 1, then both a row and a column vector is accepted, +% otherwise, x must be an mx*n matrix with the sites stored +% rowwise. +% dmodel : Struct with DACE model; see DACEFIT +% +% Output +% y : predicted response at x. +% or : If mx = 1, then or = gradient vector/Jacobian matrix of predictor +% otherwise, or is an vector with mx rows containing the estimated +% mean squared error of the predictor +% Three or four results are allowed only when mx = 1, +% dy : Gradient of predictor; column vector with n elements +% mse : Estimated mean squared error of the predictor; +% dmse : Gradient vector/Jacobian matrix of mse + +% hbn@imm.dtu.dk +% Last update August 26, 2002 + + or1 = NaN; or2 = NaN; dmse = NaN; % Default return values + if isnan(dmodel.beta) + y = NaN; + error('DMODEL has not been found') + end + + [m n] = size(dmodel.S); % number of design sites and number of dimensions + sx = size(x); % number of trial sites and their dimension + if min(sx) == 1 & n > 1 % Single trial point + nx = max(sx); + if nx == n + mx = 1; x = x(:).'; + end + else + mx = sx(1); nx = sx(2); + end + if nx ~= n + error(sprintf('Dimension of trial sites should be %d',n)) + end + + % Normalize trial sites + x = (x - repmat(dmodel.Ssc(1,:),mx,1)) ./ repmat(dmodel.Ssc(2,:),mx,1); + q = size(dmodel.Ysc,2); % number of response functions + y = zeros(mx,q); % initialize result + + if mx == 1 % one site only + dx = repmat(x,m,1) - dmodel.S; % distances to design sites + if nargout > 1 % gradient/Jacobian wanted + [f df] = feval(dmodel.regr, x); + [r dr] = feval(dmodel.corr, dmodel.theta, dx); + % Scaled Jacobian + dy = (df * dmodel.beta).' + dmodel.gamma * dr; + % Unscaled Jacobian + or1 = dy .* repmat(dmodel.Ysc(2, :)', 1, nx) ./ repmat(dmodel.Ssc(2,:), q, 1); + if q == 1 + % Gradient as a column vector + or1 = or1'; + end + if nargout > 2 % MSE wanted + + rt = dmodel.C \ r; + u = dmodel.Ft.' * rt - f.'; + v = dmodel.G \ u; + or2 = repmat(dmodel.sigma2,mx,1) .* repmat((1 + sum(v.^2) - sum(rt.^2))',1,q); + + if nargout > 3 % gradient/Jacobian of MSE wanted + % Scaled gradient as a row vector + Gv = dmodel.G' \ v; + g = (dmodel.Ft * Gv - rt)' * (dmodel.C \ dr) - (df * Gv)'; + % Unscaled Jacobian + dmse = repmat(2 * dmodel.sigma2',1,nx) .* repmat(g ./ dmodel.Ssc(2,:),q,1); + if q == 1 + % Gradient as a column vector + dmse = dmse'; + end + end + + end + + else % predictor only + f = feval(dmodel.regr, x); + r = feval(dmodel.corr, dmodel.theta, dx); + end + + % Scaled predictor + sy = f * dmodel.beta + (dmodel.gamma*r).'; + % Predictor + y = (dmodel.Ysc(1,:) + dmodel.Ysc(2,:) .* sy)'; + + else % several trial sites + % Get distances to design sites + dx = zeros(mx*m,n); kk = 1:m; + for k = 1 : mx + dx(kk,:) = repmat(x(k,:),m,1) - dmodel.S; + kk = kk + m; + end + % Get regression function and correlation + f = feval(dmodel.regr, x); + r = feval(dmodel.corr, dmodel.theta, dx); + r = reshape(r, m, mx); + + % Scaled predictor + sy = f * dmodel.beta + (dmodel.gamma * r).'; + % Predictor + y = repmat(dmodel.Ysc(1,:),mx,1) + repmat(dmodel.Ysc(2,:),mx,1) .* sy; + + if nargout > 1 % MSE wanted + rt = dmodel.C \ r; + u = dmodel.G \ (dmodel.Ft.' * rt - f.'); + or1 = repmat(dmodel.sigma2,mx,1) .* repmat((1 + colsum(u.^2) - colsum(rt.^2))',1,q); + if nargout > 2 + disp('WARNING from PREDICTOR. Only y and or1=mse are computed') + end + end + + end % of several sites + +% >>>>>>>>>>>>>>>> Auxiliary function ==================== + +function s = colsum(x) +% Columnwise sum of elements in x +if size(x,1) == 1, s = x; +else, s = sum(x); end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrcubic.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrcubic.m new file mode 100644 index 000000000..aa8740134 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrcubic.m @@ -0,0 +1,42 @@ +function [r, dr] = corrcubic(theta, d) +%CORRCUBIC Cubic correlation function, +% +% n +% r_i = prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m +% j=1 +% +% If length(theta) = 1, then the model is isotropic: +% all theta_j = theta. +% +% Call: r = corrcubic(theta, d) +% [r, dr] = corrcubic(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update June 25, 2002 + +[m n] = size(d); % number of differences and dimension of data +if length(theta) == 1 + theta = repmat(theta,1,n); +elseif length(theta) ~= n + error(sprintf('Length of theta must be 1 or %d',n)) +else + theta = theta(:).'; +end +td = min(abs(d) .* repmat(theta,m,1), 1); +ss = 1 - td.^2 .* (3 - 2*td); +r = prod(ss, 2); + +if nargout > 1 + dr = zeros(m,n); + for j = 1 : n + dd = 6*theta(j) * sign(d(:,j)) .* td(:,j) .* (td(:,j) - 1); + dr(:,j) = prod(ss(:,[1:j-1 j+1:n]),2) .* dd; + end +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/correxp.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/correxp.m new file mode 100644 index 000000000..d5465c140 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/correxp.m @@ -0,0 +1,38 @@ +function [r, dr] = correxp(theta, d) +%CORREXP Exponential correlation function +% +% n +% r_i = prod exp(-theta_j * |d_ij|) +% j=1 +% +% If length(theta) = 1, then the model is isotropic: +% theta_j = theta(1), j=1,...,n +% +% Call: r = correxp(theta, d) +% [r, dr] = correxp(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update April 12, 2002 + +[m n] = size(d); % number of differences and dimension of data +lt = length(theta); +if lt == 1, theta = repmat(theta,1,n); +elseif lt ~= n + error(sprintf('Length of theta must be 1 or %d',n)) +else + theta = theta(:).'; +end + +td = abs(d) .* repmat(-theta, m, 1); +r = exp(sum(td,2)); + +if nargout > 1 + dr = repmat(-theta,m,1) .* sign(d) .* repmat(r,1,n); +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/correxpg.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/correxpg.m new file mode 100644 index 000000000..cb11d38aa --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/correxpg.m @@ -0,0 +1,40 @@ +function [r, dr] = correxpg(theta, d) +%CORREXPG General exponential correlation function +% +% n +% r_i = prod exp(-theta_j * d_ij^theta_n+1) +% j=1 +% +% If n > 1 and length(theta) = 2, then the model is isotropic: +% theta_j = theta(1), j=1,...,n; theta_(n+1) = theta(2) +% +% Call: r = correxpg(theta, d) +% [r, dr] = correxpg(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update April 12, 2002 + +[m n] = size(d); % number of differences and dimension of data +lt = length(theta); +if n > 1 & lt == 2 + theta = [repmat(theta(1),1,n) theta(2)]; +elseif lt ~= n+1 + error(sprintf('Length of theta must be 2 or %d',n+1)) +else + theta = theta(:).'; +end + +pow = theta(end); tt = repmat(-theta(1:n), m, 1); +td = abs(d).^pow .* tt; +r = exp(sum(td,2)); + +if nargout > 1 + dr = pow * tt .* sign(d) .* (abs(d) .^ (pow-1)) .* repmat(r,1,n); +end diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrgauss.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrgauss.m new file mode 100644 index 000000000..2829779ea --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrgauss.m @@ -0,0 +1,36 @@ +function [r, dr] = corrgauss(theta, d) +%CORRGAUSS Gaussian correlation function, +% +% n +% r_i = prod exp(-theta_j * d_ij^2) , i = 1,...,m +% j=1 +% +% If length(theta) = 1, then the model is isotropic: +% all theta_j = theta . +% +% Call: r = corrgauss(theta, d) +% [r, dr] = corrgauss(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update June 2, 2002 + +[m n] = size(d); % number of differences and dimension of data +if length(theta) == 1 + theta = repmat(theta,1,n); +elseif length(theta) ~= n + error(sprintf('Length of theta must be 1 or %d',n)) +end + +td = d.^2 .* repmat(-theta(:).',m,1); +r = exp(sum(td, 2)); + +if nargout > 1 + dr = repmat(-2*theta(:).',m,1) .* d .* repmat(r,1,n); +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrlin.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrlin.m new file mode 100644 index 000000000..ba1ad6bb5 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrlin.m @@ -0,0 +1,39 @@ +function [r, dr] = corrlin(theta, d) +%CORRLIN Linear correlation function, +% +% n +% r_i = prod max(0, 1 - theta_j * d_ij) , i = 1,...,m +% j=1 +% +% If length(theta) = 1, then the model is isotropic: +% all theta_j = theta . +% +% Call: r = corrlin(theta, d) +% [r, dr] = corrlin(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update April 12, 2002 + +[m n] = size(d); % number of differences and dimension of data +if length(theta) == 1 + theta = repmat(theta,1,n); +elseif length(theta) ~= n + error(sprintf('Length of theta must be 1 or %d',n)) +end + +td = max(1 - abs(d) .* repmat(theta(:).',m,1), 0); +r = prod(td, 2); + +if nargout > 1 + dr = zeros(m,n); + for j = 1 : n + dr(:,j) = prod(td(:,[1:j-1 j+1:n]),2) .* (-theta(j) * sign(d(:,j))); + end +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrspherical.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrspherical.m new file mode 100644 index 000000000..d825ea737 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrspherical.m @@ -0,0 +1,42 @@ +function [r, dr] = corrspherical(theta, d) +%CORRSPHERICAL Spherical correlation function, +% +% n +% r_i = prod max(0, 1 - 1.5(theta_j*d_ij) + .5(theta_j*d_ij)^3) , i = 1,...,m +% j=1 +% +% If length(theta) = 1, then the model is isotropic: +% all theta_j = theta . +% +% Call: r = corrspherical(theta, d) +% [r, dr] = corrspherical(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update April 12, 2002 + +[m n] = size(d); % number of differences and dimension of data +if length(theta) == 1 + theta = repmat(theta,1,n); +elseif length(theta) ~= n + error(sprintf('Length of theta must be 1 or %d',n)) +else + theta = theta(:).'; +end +td = min(abs(d) .* repmat(theta,m,1), 1); +ss = 1 - td .* (1.5 - .5*td.^2); +r = prod(ss, 2); + +if nargout > 1 + dr = zeros(m,n); + for j = 1 : n + dd = 1.5*theta(j) * sign(d(:,j)).*(td(:,j).^2 - 1); + dr(:,j) = prod(ss(:,[1:j-1 j+1:n]),2) .* dd; + end +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrspline.m b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrspline.m new file mode 100644 index 000000000..1bca68850 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/corr/corrspline.m @@ -0,0 +1,67 @@ +function [r, dr] = corrspline(theta, d) +%CORRSPLINE Cubic spline correlation function, +% +% n +% r_i = prod S(theta_j*d_ij) , i = 1,...,m +% j=1 +% +% with +% 1 - 15x^2 + 30x^3 for 0 <= x <= 0.5 +% S(x) = 1.25(1 - x)^3 for 0.5 < x < 1 +% 0 for x >= 1 +% If length(theta) = 1, then the model is isotropic: +% all theta_j = theta. +% +% Call: r = corrspline(theta, d) +% [r, dr] = corrspline(theta, d) +% +% theta : parameters in the correlation function +% d : m*n matrix with differences between given data points +% r : correlation +% dr : m*n matrix with the Jacobian of r at x. It is +% assumed that x is given implicitly by d(i,:) = x - S(i,:), +% where S(i,:) is the i'th design site. + +% hbn@imm.dtu.dk +% Last update May 30, 2002 + +[m n] = size(d); % number of differences and dimension of data +if length(theta) == 1 + theta = repmat(theta,1,n); +elseif length(theta) ~= n + error(sprintf('Length of theta must be 1 or %d',n)) +else + theta = theta(:).'; +end +mn = m*n; ss = zeros(mn,1); +xi = reshape(abs(d) .* repmat(theta,m,1), mn,1); +% Contributions to first and second part of spline +i1 = find(xi <= 0.2); +i2 = find(0.2 < xi & xi < 1); +if ~isempty(i1) + ss(i1) = 1 - xi(i1).^2 .* (15 - 30*xi(i1)); +end +if ~isempty(i2) + ss(i2) = 1.25 * (1 - xi(i2)).^3; +end +% Values of correlation +ss = reshape(ss,m,n); +r = prod(ss, 2); + +if nargout > 1 % get Jacobian + u = reshape(sign(d) .* repmat(theta,m,1), mn,1); + dr = zeros(mn,1); + if ~isempty(i1) + dr(i1) = u(i1) .* ( (90*xi(i1) - 30) .* xi(i1) ); + end + if ~isempty(i2) + dr(i2) = -3.75 * u(i2) .* (1 - xi(i2)).^2; + end + ii = 1 : m; + for j = 1 : n + sj = ss(:,j); ss(:,j) = dr(ii); + dr(ii) = prod(ss,2); + ss(:,j) = sj; ii = ii + m; + end + dr = reshape(dr,m,n); +end % Jacobian \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/readme.txt b/PlatEMO/Algorithms/surrogate_models/DACE/readme.txt new file mode 100644 index 000000000..15dde8d5a --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/readme.txt @@ -0,0 +1 @@ +We note that the Dacefit.m and Predictor.m files have slight modifications compared to the original codes in DACE toolbox. The revised version allow for repeated observation points. diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly0.m b/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly0.m new file mode 100644 index 000000000..a02dfe213 --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly0.m @@ -0,0 +1,18 @@ +function [f, df] = regpoly0(S) +%REGPOLY0 Zero order polynomial regression function +% +% Call: f = regpoly0(S) +% [f, df] = regpoly0(S) +% +% S : m*n matrix with design sites +% f : ones(m,1) +% df : Jacobian at the first point (first row in S) + +% hbn@imm.dtu.dk +% Last update April 12, 2002 + +[m n] = size(S); +f = ones(m,1); +if nargout > 1 + df = zeros(n,1); +end diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly1.m b/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly1.m new file mode 100644 index 000000000..aff99e62e --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly1.m @@ -0,0 +1,18 @@ +function [f, df] = regpoly1(S) +%REGPOLY1 First order polynomial regression function +% +% Call: f = regpoly1(S) +% [f, df] = regpoly1(S) +% +% S : m*n matrix with design sites +% f = [1 s] +% df : Jacobian at the first point (first row in S) + +% hbn@imm.dtu.dk +% Last update April 12, 2002 + +[m n] = size(S); +f = [ones(m,1) S]; +if nargout > 1 + df = [zeros(n,1) eye(n)]; +end \ No newline at end of file diff --git a/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly2.m b/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly2.m new file mode 100644 index 000000000..92f4566fa --- /dev/null +++ b/PlatEMO/Algorithms/surrogate_models/DACE/reg/regpoly2.m @@ -0,0 +1,31 @@ +function [f, df] = regpoly2(S) +%REGPOLY2 Second order polynomial regression function +% Call: f = regpoly2(S) +% [f, df] = regpoly2(S) +% +% S : m*n matrix with design sites +% f = [1 S S(:,1)*S S(:,2)S(:,2:n) ... S(:,n)^2] +% df : Jacobian at the first point (first row in S) + +% hbn@imm.dtu.dk +% Last update September 4, 2002 + +[m n] = size(S); +nn = (n+1)*(n+2)/2; % Number of columns in f +% Compute f +f = [ones(m,1) S zeros(m,nn-n-1)]; +j = n+1; q = n; +for k = 1 : n + f(:,j+(1:q)) = repmat(S(:,k),1,q) .* S(:,k:n); + j = j+q; q = q-1; +end + +if nargout > 1 + df = [zeros(n,1) eye(n) zeros(n,nn-n-1)]; + j = n+1; q = n; + for k = 1 : n + df(k,j+(1:q)) = [2*S(1,k) S(1,k+1:n)]; + for i = 1 : n-k, df(k+i,j+1+i) = S(1,k); end + j = j+q; q = q-1; + end +end \ No newline at end of file