GA-SVMアルゴリズムpython実装


GAによるSVMスーパーパラメータの最適化
  • 遺伝アルゴリズムプログラム
  • Geatpyツールボックスを使用して計算する
  • 完全コード
  • 最近,SVMの超パラメータを遺伝的アルゴリズムで最適化する方法を学習し,他のブロガーの文章を参考にして,この方法を実現した.
    遺伝アルゴリズムプログラム
    まず、SVMとMSEの関数を定義します.
    def msefunc(predictval, realval):
        squaredError = []
        # absError = []
        for i in range(len(predictval)):
            val = predictval[i] - realval[i]
            squaredError.append(val * val)  #             
    
        print("Square Error: ", squaredError)
        print("MSE = ", sum(squaredError) / len(squaredError))  #     MSE
        return sum(squaredError) / len(squaredError)
    
    
    def SVMResult(vardim, x, bound):
        X_train = [[0, 0], [2, 2], [1, 4], [3, 7], [3, 6]]
        y_train = [0.5, 2.5, 3.0, 4.0, 5]
        X_valid = [[1, 1], [3, 5]]
        y_valid = [3, 4]
        c = x[0]
        e = x[1]
        g = x[2]
        clf = svm.SVR(C=c, epsilon=e, gamma=g, kernel='rbf')
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_valid)
        print("y_pred is", y_pred , "y_true is" , y_valid)
        #   svm mse      
        return msefunc(y_pred, y_valid)
    

    遺伝アルゴリズム種群個体計算:
    class GAIndividual:
        '''
        individual of genetic algorithm
          pop      
        '''
        def __init__(self, vardim, bound):
            '''
            vardim: dimension of variables
            bound: boundaries of variables
            '''
            self.vardim = vardim
            self.bound = bound
            self.fitness = 0.
    
        def generate(self):
            '''
            generate a random chromsome for genetic algorithm
            '''
            len = self.vardim
            rnd = np.random.random(size=len)
            self.chrom = np.zeros(len)
            for i in range(0, len):
                self.chrom[i] = self.bound[0, i] + \
                    (self.bound[1, i] - self.bound[0, i]) * rnd[i]
    
        def calculateFitness(self):
            '''
            calculate the fitness of the chromsome
            '''
            self.fitness = SVMResult(self.vardim, self.chrom, self.bound)
    

    クラスタの選択、交差、変異の方法を必要に応じて再定義できるクラスタ回折クラスを確立します.
    class GeneticAlgorithm:
        '''
        The class for genetic algorithm
        '''
        def __init__(self, sizepop, vardim, bound, MAXGEN, params):
            '''
            sizepop: population sizepop
            vardim: dimension of variables
            bound: boundaries of variables
            MAXGEN: termination condition
            param: algorithm required parameters, it is a list which is consisting 
                   of crossover rate, mutation rate, alpha
            '''
            self.sizepop = sizepop
            self.MAXGEN = MAXGEN
            self.vardim = vardim
            self.bound = bound
            self.population = []
            self.fitness = np.zeros((self.sizepop, 1))
            self.trace = np.zeros((self.MAXGEN, 2))
            self.params = params
    
        def initialize(self):
            '''
            initialize the population
            '''
            for i in range(0, self.sizepop):
                ind = GAIndividual(self.vardim, self.bound)
                ind.generate()
                self.population.append(ind)
    
        def evaluate(self):
            '''
            evaluation of the population fitnesses
            '''
            for i in range(0, self.sizepop):
                self.population[i].calculateFitness()
                self.fitness[i] = self.population[i].fitness
    
        def solve(self):
            '''
            evolution process of genetic algorithm
            '''
            self.t = 0 #     
            self.initialize() #      
            self.evaluate() #      
            best = np.max(self.fitness) #          
            bestIndex = np.argmax(self.fitness) #         
            self.best = copy.deepcopy(self.population[bestIndex]) 
            self.avefitness = np.mean(self.fitness) #      
            self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
            self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
            print(
                "Generation %d: optimal function value is: %f; average function value is %f"
                % (self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
            while (self.t < self.MAXGEN - 1):
                self.t += 1
                self.selectionOperation() #   
                self.crossoverOperation() #   
                self.mutationOperation()  #   
                self.evaluate()           #           
                best = np.max(self.fitness)
                bestIndex = np.argmax(self.fitness)
                if best > self.best.fitness:
                    self.best = copy.deepcopy(self.population[bestIndex])
                self.avefitness = np.mean(self.fitness)
                #                 
                self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness 
                #          
                self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
                print(
                    "Generation %d: optimal function value is: %f; average function value is %f"
                    % (self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
            print("Optimal function value is: %f; " % self.trace[self.t, 0])
            print("Optimal solution is:",self.best.chrom)
            self.printResult()
    
        def selectionOperation(self):
            '''
            selection operation for Genetic Algorithm
            '''
            newpop = []
            totalFitness = np.sum(self.fitness)
            accuFitness = np.zeros((self.sizepop, 1))
    
            #         
            sum1 = 0.
            for i in range(0, self.sizepop):
                accuFitness[i] = sum1 + self.fitness[i] / totalFitness
                sum1 = accuFitness[i]
    
            #           
            for i in range(0, self.sizepop):
                r = random.random()
                idx = 0
                for j in range(0, self.sizepop - 1):
                    if j == 0 and r < accuFitness[j]:
                        idx = 0
                        break
                    elif r >= accuFitness[j] and r < accuFitness[j + 1]:
                        idx = j + 1
                        break
                newpop.append(self.population[idx])
            self.population = newpop
    
        def crossoverOperation(self):
            '''
            crossover operation for genetic algorithm
            '''
            newpop = []
            #           
            for i in range(0, self.sizepop, 2):
                idx1 = random.randint(0, self.sizepop - 1)
                idx2 = random.randint(0, self.sizepop - 1)
                while idx2 == idx1:
                    idx2 = random.randint(0, self.sizepop - 1)
                newpop.append(copy.deepcopy(self.population[idx1]))
                newpop.append(copy.deepcopy(self.population[idx2]))
                r = random.random()
                # params[0]=>crossover_rate; 
                # params[1]=>mutation_rate; 
                # params[2]=>alpha
                if r < self.params[0]:
                    crossPos = random.randint(1, self.vardim - 1)
                    for j in range(crossPos, self.vardim):
                        # chrom[j1]=chrom[j1]*α+(1-α)*chrom[j2]
                        newpop[i].chrom[j] = newpop[i].chrom[j] * self.params[2] +\
                            (1 - self.params[2]) * newpop[i + 1].chrom[j]
                        # chrom[j2]=chrom[j2]*α+(1-α)*chrom[j1]
                        newpop[i + 1].chrom[j] = newpop[i + 1].chrom[j] * self.params[2] + \
                            (1 - self.params[2]) * newpop[i].chrom[j]
            self.population = newpop
    
        def mutationOperation(self):
            '''
            mutation operation for genetic algorithm
            '''
            newpop = []
            for i in range(0, self.sizepop):
                newpop.append(copy.deepcopy(self.population[i]))
                r = random.random()
                if r < self.params[1]:
                    mutatePos = random.randint(0, self.vardim - 1)
                    theta = random.random()
                    if theta > 0.5:
                        # chrom=chrom-(chrom-lowerlimit)*(1-rand^(1-t/N))
                        newpop[i].chrom[mutatePos] = newpop[i].chrom[mutatePos] - \
                            (newpop[i].chrom[mutatePos] - self.bound[0, mutatePos]) * \
                                (1 - random.random()**(1 - self.t / self.MAXGEN))
                    else:
                        # chrom=chrom+(uperlimit-chrom)*(1-rand^(1-t/N))
                        newpop[i].chrom[mutatePos] = newpop[i].chrom[mutatePos] + \
                            (self.bound[1, mutatePos] - newpop[i].chrom[mutatePos]) * \
                                (1 - random.random()**(1 - self.t / self.MAXGEN))
            self.population = newpop
    
        def printResult(self):
            '''
            plot the result of the genetic algorithm
            '''
            x = np.arange(0, self.MAXGEN)
            y1 = self.trace[:, 0]
            y2 = self.trace[:, 1]
            plt.plot(x, y1, 'r', label='optimal value')
            plt.plot(x, y2, 'g', label='average value')
            plt.xlabel("Iteration")
            plt.ylabel("function value")
            plt.title("Genetic algorithm for function optimization")
            plt.legend()
            plt.show()
    

    最後にメインプログラムを構築し、計算を行う
    if __name__ == "__main__":
        bound = np.tile([[0.0000001], [1]], 3)
        # def __init__(self, sizepop, vardim, bound, MAXGEN, params):
        ga = GeneticAlgorithm(60, 3, bound, 100, [0.9, 0.1, 0.5])
        ga.solve()
    

    Geatpyツールボックスを使用した計算
    Geatpyは、初期化種群、選択、交差、変異、再挿入、マルチターゲット最適化非支配ソートなど、実装された多くの遺伝的および進化アルゴリズム関連演算子のライブラリ関数を提供し、多様化された進化アルゴリズムを実現するために実装された多くの進化アルゴリズムテンプレートを提供する.Matlab、Java、Pythonが作成した有名なツールボックス、プラットフォーム、フレームワークなどよりも実行効率が高く、学習コストが低く、モジュールが高度にデカップリングされ、拡張性が高い.
    まず関連ライブラリをインポート
    # -*- coding: utf-8 -*-
    import numpy as np
    import geatpy as ea
    from sklearn import svm
    from sklearn import preprocessing
    from sklearn.model_selection import cross_val_score
    import multiprocessing as mp
    from multiprocessing import Pool as ProcessPool
    from multiprocessing.dummy import Pool as ThreadPool
    from sklearn.datasets import load_iris
    from sklearn.metrics import r2_score
    

    最適化目標の定義:
    class MyProblem(ea.Problem):  #   Problem  
        def __init__(self, PoolType):  # PoolType    'Process' 'Thread'    
            name = 'MyProblem'  #    name(    ,      )
            M = 1  #    M(    )
            maxormins = [-1]  #    maxormins(           ,1:      ;-1:      )
            Dim = 2  #    Dim(      )
            varTypes = [0, 0]  #    varTypes(       ,   0           ;1      )
            lb = [2 ** (-8)] * Dim  #       
            ub = [2 ** 8] * Dim  #       
            lbin = [1] * Dim  #        (0            ,1    )
            ubin = [1] * Dim  #        (0            ,1    )
            #              
            ea.Problem.__init__(self, name, M, maxormins, Dim,
                                varTypes, lb, ub, lbin, ubin)
            #               
            X_train = [[0, 0.1], [1.2, 1], [2.1, 2], [3, 3.1], [4, 4.1], [5, 5.1], [6, 6.2], [7, 7.1]]
            y_train = [0, 0.9, 2.1, 3, 4.1, 5.2 ,5.9 , 7]
            self.data = X_train  #         (   )
            self.dataTarget = np.array(y_train)
            #            
            self.PoolType = PoolType
            if self.PoolType == 'Thread':
                self.pool = ThreadPool(2)  #       
            elif self.PoolType == 'Process':
                num_cores = int(mp.cpu_count())  #          
                self.pool = ProcessPool(num_cores)  #       
    
        def aimFunc(self, pop):  #     ,         
            Vars = pop.Phen  #          NIND*DIM
            args = list(
                zip(list(range(pop.sizes)), [Vars] * pop.sizes,\
                     [self.data] * pop.sizes, [self.dataTarget] * pop.sizes))
            if self.PoolType == 'Thread':
                pop.ObjV = np.array(list(self.pool.map(subAimFunc, args)))
            elif self.PoolType == 'Process':
                result = self.pool.map_async(subAimFunc, args)
                result.wait()
                pop.ObjV = np.array(result.get())
    
        def test(self, C, G):  #       C、Gamma        
            #        
            X_valid = [[1.2, 1.3], [3.5, 3.55], [6.5,6.4]]
            y_valid = [1.3, 3.5, 6.4]
            data_test = np.array(X_valid)  #         (   )
            dataTarget_test = np.array(y_valid)  #         
            svr = svm.SVR(C=C, kernel='rbf', gamma=G).fit(
                self.data, self.dataTarget)  #                       
            y_predict = svr.predict(X_valid)  #                      
            print("     R2 = %s" % (r2_score(y_predict,y_valid)))
    
    
    def subAimFunc(args):
        i = args[0]
        Vars = args[1]
        data = args[2]
        dataTarget = args[3]
        C = Vars[i, 0]
        G = Vars[i, 1]
        svr = svm.SVR(C=C, kernel='rbf', gamma=G).fit(
            data, dataTarget)  #                       
        scores = cross_val_score(svr, data, dataTarget, cv=4)  #          
        ObjV_i = [scores.mean()]  #                  
        return ObjV_i
    

    しゅプログラム
    if __name__ == '__main__':
        """===============================       ==========================="""
        PoolType = 'Thread'  #        ,    : PoolType = 'Process',       
        problem = MyProblem(PoolType)  #       
        """=================================    =============================="""
        Encoding = 'RI'  #     
        NIND = 50  #     
        Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders)  #        
        population = ea.Population(Encoding, Field, NIND)  #        (          ,             )
        """===============================      ============================="""
        myAlgorithm = ea.soea_DE_rand_1_bin_templet(problem, population)  #            
        myAlgorithm.MAXGEN = 30  #       
        myAlgorithm.trappedValue = 1e-6  # “    ”    
        myAlgorithm.maxTrappedCount = 10  #             ,    maxTrappedCount          ,     
        myAlgorithm.logTras = 1  #            ,    0        
        myAlgorithm.verbose = True  #             
        myAlgorithm.drawing = 1  #       (0:   ;1:     ;2:          ;3:          )
        """==========================            ========================"""
        [BestIndi, population] = myAlgorithm.run()  #       ,              
        BestIndi.save()  #               
        """=================================    =============================="""
        print('    :%s' % myAlgorithm.evalsNum)
        print('     %s  ' % myAlgorithm.passTime)
        if BestIndi.sizes != 0:
            print('         :%s' % (BestIndi.ObjV[0][0]))
            print('         :')
            for i in range(BestIndi.Phen.shape[1]):
                print(BestIndi.Phen[0, i])
            """=================================    ==============================="""
            problem.test(C=BestIndi.Phen[0, 0], G=BestIndi.Phen[0, 1])
        else:
            print('      。')
    
    

    完全なコード
    from sklearn import svm
    import numpy as np
    import random
    import copy
    import matplotlib.pyplot as plt
    
    
    def msefunc(predictval, realval):
        squaredError = []
        # absError = []
        for i in range(len(predictval)):
            val = predictval[i] - realval[i]
            squaredError.append(val * val)  #             
    
        print("Square Error: ", squaredError)
        print("MSE = ", sum(squaredError) / len(squaredError))  #     MSE
        return sum(squaredError) / len(squaredError)
    
    
    def SVMResult(vardim, x, bound):
        X_train = [[0, 0], [2, 2], [1, 4], [3, 7], [3, 6]]
        y_train = [0.5, 2.5, 3.0, 4.0, 5]
        X_valid = [[1, 1], [3, 5]]
        y_valid = [3, 4]
        c = x[0]
        e = x[1]
        g = x[2]
        clf = svm.SVR(C=c, epsilon=e, gamma=g, kernel='rbf')
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_valid)
        print("y_pred is", y_pred , "y_true is" , y_valid)
        #   svm mse      
        return msefunc(y_pred, y_valid)
    
    
    '''=========================================================================================================
    ============================================================================================================'''
    
    
    class GAIndividual:
        '''
        individual of genetic algorithm
          pop      
        '''
        def __init__(self, vardim, bound):
            '''
            vardim: dimension of variables
            bound: boundaries of variables
            '''
            self.vardim = vardim
            self.bound = bound
            self.fitness = 0.
    
        def generate(self):
            '''
            generate a random chromsome for genetic algorithm
            '''
            len = self.vardim
            rnd = np.random.random(size=len)
            self.chrom = np.zeros(len)
            for i in range(0, len):
                self.chrom[i] = self.bound[0, i] + \
                    (self.bound[1, i] - self.bound[0, i]) * rnd[i]
    
        def calculateFitness(self):
            '''
            calculate the fitness of the chromsome
            '''
            self.fitness = SVMResult(self.vardim, self.chrom, self.bound)
    
    
    '''=========================================================================================================
    ============================================================================================================'''
    
    
    class GeneticAlgorithm:
        '''
        The class for genetic algorithm
        '''
        def __init__(self, sizepop, vardim, bound, MAXGEN, params):
            '''
            sizepop: population sizepop
            vardim: dimension of variables
            bound: boundaries of variables
            MAXGEN: termination condition
            param: algorithm required parameters, it is a list which is consisting 
                   of crossover rate, mutation rate, alpha
            '''
            self.sizepop = sizepop
            self.MAXGEN = MAXGEN
            self.vardim = vardim
            self.bound = bound
            self.population = []
            self.fitness = np.zeros((self.sizepop, 1))
            self.trace = np.zeros((self.MAXGEN, 2))
            self.params = params
    
        def initialize(self):
            '''
            initialize the population
            '''
            for i in range(0, self.sizepop):
                ind = GAIndividual(self.vardim, self.bound)
                ind.generate()
                self.population.append(ind)
    
        def evaluate(self):
            '''
            evaluation of the population fitnesses
            '''
            for i in range(0, self.sizepop):
                self.population[i].calculateFitness()
                self.fitness[i] = self.population[i].fitness
    
        def solve(self):
            '''
            evolution process of genetic algorithm
            '''
            self.t = 0 #     
            self.initialize() #      
            self.evaluate() #      
            best = np.max(self.fitness) #          
            bestIndex = np.argmax(self.fitness) #         
            self.best = copy.deepcopy(self.population[bestIndex]) 
            self.avefitness = np.mean(self.fitness) #      
            self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
            self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
            print(
                "Generation %d: optimal function value is: %f; average function value is %f"
                % (self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
            while (self.t < self.MAXGEN - 1):
                self.t += 1
                self.selectionOperation() #   
                self.crossoverOperation() #   
                self.mutationOperation()  #   
                self.evaluate()           #           
                best = np.max(self.fitness)
                bestIndex = np.argmax(self.fitness)
                if best > self.best.fitness:
                    self.best = copy.deepcopy(self.population[bestIndex])
                self.avefitness = np.mean(self.fitness)
                #                 
                self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness 
                #          
                self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
                print(
                    "Generation %d: optimal function value is: %f; average function value is %f"
                    % (self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
            print("Optimal function value is: %f; " % self.trace[self.t, 0])
            print("Optimal solution is:",self.best.chrom)
            self.printResult()
    
        def selectionOperation(self):
            '''
            selection operation for Genetic Algorithm
            '''
            newpop = []
            totalFitness = np.sum(self.fitness)
            accuFitness = np.zeros((self.sizepop, 1))
    
            #         
            sum1 = 0.
            for i in range(0, self.sizepop):
                accuFitness[i] = sum1 + self.fitness[i] / totalFitness
                sum1 = accuFitness[i]
    
            #           
            for i in range(0, self.sizepop):
                r = random.random()
                idx = 0
                for j in range(0, self.sizepop - 1):
                    if j == 0 and r < accuFitness[j]:
                        idx = 0
                        break
                    elif r >= accuFitness[j] and r < accuFitness[j + 1]:
                        idx = j + 1
                        break
                newpop.append(self.population[idx])
            self.population = newpop
    
        def crossoverOperation(self):
            '''
            crossover operation for genetic algorithm
            '''
            newpop = []
            #           
            for i in range(0, self.sizepop, 2):
                idx1 = random.randint(0, self.sizepop - 1)
                idx2 = random.randint(0, self.sizepop - 1)
                while idx2 == idx1:
                    idx2 = random.randint(0, self.sizepop - 1)
                newpop.append(copy.deepcopy(self.population[idx1]))
                newpop.append(copy.deepcopy(self.population[idx2]))
                r = random.random()
                # params[0]=>crossover_rate; 
                # params[1]=>mutation_rate; 
                # params[2]=>alpha
                if r < self.params[0]:
                    crossPos = random.randint(1, self.vardim - 1)
                    for j in range(crossPos, self.vardim):
                        # chrom[j1]=chrom[j1]*α+(1-α)*chrom[j2]
                        newpop[i].chrom[j] = newpop[i].chrom[j] * self.params[2] +\
                            (1 - self.params[2]) * newpop[i + 1].chrom[j]
                        # chrom[j2]=chrom[j2]*α+(1-α)*chrom[j1]
                        newpop[i + 1].chrom[j] = newpop[i + 1].chrom[j] * self.params[2] + \
                            (1 - self.params[2]) * newpop[i].chrom[j]
            self.population = newpop
    
        def mutationOperation(self):
            '''
            mutation operation for genetic algorithm
            '''
            newpop = []
            for i in range(0, self.sizepop):
                newpop.append(copy.deepcopy(self.population[i]))
                r = random.random()
                if r < self.params[1]:
                    mutatePos = random.randint(0, self.vardim - 1)
                    theta = random.random()
                    if theta > 0.5:
                        # chrom=chrom-(chrom-lowerlimit)*(1-rand^(1-t/N))
                        newpop[i].chrom[mutatePos] = newpop[i].chrom[mutatePos] - \
                            (newpop[i].chrom[mutatePos] - self.bound[0, mutatePos]) * \
                                (1 - random.random()**(1 - self.t / self.MAXGEN))
                    else:
                        # chrom=chrom+(uperlimit-chrom)*(1-rand^(1-t/N))
                        newpop[i].chrom[mutatePos] = newpop[i].chrom[mutatePos] + \
                            (self.bound[1, mutatePos] - newpop[i].chrom[mutatePos]) * \
                                (1 - random.random()**(1 - self.t / self.MAXGEN))
            self.population = newpop
    
        def printResult(self):
            '''
            plot the result of the genetic algorithm
            '''
            x = np.arange(0, self.MAXGEN)
            y1 = self.trace[:, 0]
            y2 = self.trace[:, 1]
            plt.plot(x, y1, 'r', label='optimal value')
            plt.plot(x, y2, 'g', label='average value')
            plt.xlabel("Iteration")
            plt.ylabel("function value")
            plt.title("Genetic algorithm for function optimization")
            plt.legend()
            plt.show()
    
    '''=========================================================================================================
    ============================================================================================================'''
    
    if __name__ == "__main__":
        bound = np.tile([[0.0000001], [1]], 3)
        # def __init__(self, sizepop, vardim, bound, MAXGEN, params):
        ga = GeneticAlgorithm(60, 3, bound, 100, [0.9, 0.1, 0.5])
        ga.solve()
    

    Geatpyフルコード
    # -*- coding: utf-8 -*-
    import numpy as np
    import geatpy as ea
    from sklearn import svm
    from sklearn import preprocessing
    from sklearn.model_selection import cross_val_score
    import multiprocessing as mp
    from multiprocessing import Pool as ProcessPool
    from multiprocessing.dummy import Pool as ThreadPool
    from sklearn.datasets import load_iris
    from sklearn.metrics import r2_score
    
    """
         main.py   PoolType                  。
      :      ,     “if __name__ == '__main__':”    ,
             multiprocessing           。
    """
    
    class MyProblem(ea.Problem):  #   Problem  
        def __init__(self, PoolType):  # PoolType    'Process' 'Thread'    
            name = 'MyProblem'  #    name(    ,      )
            M = 1  #    M(    )
            maxormins = [-1]  #    maxormins(           ,1:      ;-1:      )
            Dim = 2  #    Dim(      )
            varTypes = [0, 0]  #    varTypes(       ,   0           ;1      )
            lb = [2 ** (-8)] * Dim  #       
            ub = [2 ** 8] * Dim  #       
            lbin = [1] * Dim  #        (0            ,1    )
            ubin = [1] * Dim  #        (0            ,1    )
            #              
            ea.Problem.__init__(self, name, M, maxormins, Dim,
                                varTypes, lb, ub, lbin, ubin)
            #               
            X_train = [[0, 0.1], [1.2, 1], [2.1, 2], [3, 3.1], [4, 4.1], [5, 5.1], [6, 6.2], [7, 7.1]]
            y_train = [0, 0.9, 2.1, 3, 4.1, 5.2 ,5.9 , 7]
            self.data = X_train  #         (   )
            self.dataTarget = np.array(y_train)
            #            
            self.PoolType = PoolType
            if self.PoolType == 'Thread':
                self.pool = ThreadPool(2)  #       
            elif self.PoolType == 'Process':
                num_cores = int(mp.cpu_count())  #          
                self.pool = ProcessPool(num_cores)  #       
    
        def aimFunc(self, pop):  #     ,         
            Vars = pop.Phen  #          NIND*DIM
            args = list(
                zip(list(range(pop.sizes)), [Vars] * pop.sizes,\
                     [self.data] * pop.sizes, [self.dataTarget] * pop.sizes))
            if self.PoolType == 'Thread':
                pop.ObjV = np.array(list(self.pool.map(subAimFunc, args)))
            elif self.PoolType == 'Process':
                result = self.pool.map_async(subAimFunc, args)
                result.wait()
                pop.ObjV = np.array(result.get())
    
        def test(self, C, G):  #       C、Gamma        
            #        
            X_valid = [[1.2, 1.3], [3.5, 3.55], [6.5,6.4]]
            y_valid = [1.3, 3.5, 6.4]
            data_test = np.array(X_valid)  #         (   )
            dataTarget_test = np.array(y_valid)  #         
            svr = svm.SVR(C=C, kernel='rbf', gamma=G).fit(
                self.data, self.dataTarget)  #                       
            y_predict = svr.predict(X_valid)  #                      
            print("     R2 = %s" % (r2_score(y_predict,y_valid)))
    
    
    def subAimFunc(args):
        i = args[0]
        Vars = args[1]
        data = args[2]
        dataTarget = args[3]
        C = Vars[i, 0]
        G = Vars[i, 1]
        svr = svm.SVR(C=C, kernel='rbf', gamma=G).fit(
            data, dataTarget)  #                       
        scores = cross_val_score(svr, data, dataTarget, cv=4)  #          
        ObjV_i = [scores.mean()]  #                  
        return ObjV_i
    
    
    if __name__ == '__main__':
        """===============================       ==========================="""
        PoolType = 'Thread'  #        ,    : PoolType = 'Process',       
        problem = MyProblem(PoolType)  #       
        """=================================    =============================="""
        Encoding = 'RI'  #     
        NIND = 50  #     
        Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders)  #        
        population = ea.Population(Encoding, Field, NIND)  #        (          ,             )
        """===============================      ============================="""
        myAlgorithm = ea.soea_DE_rand_1_bin_templet(problem, population)  #            
        myAlgorithm.MAXGEN = 30  #       
        myAlgorithm.trappedValue = 1e-6  # “    ”    
        myAlgorithm.maxTrappedCount = 10  #             ,    maxTrappedCount          ,     
        myAlgorithm.logTras = 1  #            ,    0        
        myAlgorithm.verbose = True  #             
        myAlgorithm.drawing = 1  #       (0:   ;1:     ;2:          ;3:          )
        """==========================            ========================"""
        [BestIndi, population] = myAlgorithm.run()  #       ,              
        BestIndi.save()  #               
        """=================================    =============================="""
        print('    :%s' % myAlgorithm.evalsNum)
        print('     %s  ' % myAlgorithm.passTime)
        if BestIndi.sizes != 0:
            print('         :%s' % (BestIndi.ObjV[0][0]))
            print('         :')
            for i in range(BestIndi.Phen.shape[1]):
                print(BestIndi.Phen[0, i])
            """=================================    ==============================="""
            problem.test(C=BestIndi.Phen[0, 0], G=BestIndi.Phen[0, 1])
        else:
            print('      。')
    
    

    参照:[1]:https://blog.csdn.net/fsfsfsdfsdfdr/article/details/85013977 [2]: http://www.cnblogs.com/biaoyu/p/4857881.html [3]: https://scikit-learn.org/stable/modules/svm.html#regression [4]:https://github.com/geatpydev/geatpy/blob/master/geatpy/demo/soea_demo/soea_demo6/MyProblem.py