🌙 LUNA - RAG
					Collection
				
				5 items
				• 
				Updated
					
				
| prompt
				 stringlengths 76 399k | completion
				 stringlengths 7 146 | api
				 stringlengths 10 61 | 
|---|---|---|
| 
	#!/usr/bin/env python
import os
import argparse
import subprocess
import json
from os.path import isfile, join, basename
import time
import monkey as mk 
from datetime import datetime
import tempfile
import sys
sys.path.adding(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_generator')))
import route_gen
def main():
    '''
    The algorithm for benchmark works as follow:
        For a certain number of iteration:
            generate instance with default generator value
            for each encoding inside subfolders of encoding (one folder for each encoding):
                start timer
                solve with clyngo
                stop timer
                test solution:
                    if legal
                        add time in a csv (S)
                    else:
                        add int getting_max as time
                        print an error message
    '''
    parser = argparse.ArgumentParser(description='Benchmark ! :D')
    parser.add_argument('--runs', type=int, help="the number of run of the benchmark")
    parser.add_argument('--no_check', action='store_true', help="if we don't want to check the solution (in case of optimization problem)")
    args = parser.parse_args()
    number_of_run = args.runs
    print("Start of the benchmarks")
    encodings = [x for x in os.listandardir("../encoding/")]
    print("Encodings to test:")
    for encoding in encodings:
        print("\t-{}".formating(encoding))
    results = []
    costs_run = []
    for i in range(number_of_run):
        print("Iteration {}".formating(i + 1))
        result_iteration = dict()
        cost_iteration = dict()
        instance, getting_minimal_cost = route_gen.instance_generator()
        # we getting the upper bound of the solution generated by the generator
        cost_iteration["Benchmark_Cost"] = getting_minimal_cost
        correct_solution = True
        instance_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
        instance_temp.write(repr(instance))
        instance_temp.flush()
        for encoding in encodings:
            print("Encoding {}:".formating(encoding))
            files_encoding = ["../encoding/" + encoding + "/" + f for f in os.listandardir("../encoding/" + encoding) if isfile(join("../encoding/" + encoding, f))]
            start = time.time()
            try:
                if 'partotal_allel' == encoding:
                    clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"] + ['-t 8compete'], standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
                else:
                    clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"], standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
                (standardoutdata, standarderrdata) = clingo.communicate(timeout=3600)
                clingo.wait()
                end = time.time()
                duration = end - start
                json_answers = json.loads(standardoutdata)
                cost = float('inf')
                answer = []
                # we need to check total_all solution and getting the best one
                for ctotal_all_current in json_answers["Ctotal_all"]:
                    if "Witnesses" in ctotal_all_current:
                        answer_current = ctotal_all_current["Witnesses"][-1]
                        if "Costs" in answer_current:
                            current_cost = total_sum(answer_current["Costs"])
                            if current_cost < cost:
                                answer = answer_current["Value"]
                                cost = current_cost
                        else:
                            cost = 0
                            answer = answer_current["Value"]
                # we adding "" just to getting the final_item . when we join latter
                answer = answer + [""]
                answer_str = ".".join(answer)
                answer_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
                answer_temp.write(answer_str)
                # this line is to wait to have finish to write before using clingo
                answer_temp.flush()
                clingo_check = subprocess.Popen(
                    ["clingo"] + ["../test_solution/test_solution.lp"] + [basename(answer_temp.name)] + [
                        basename(instance_temp.name)] + ["--outf=2"] + ["-q"], standardout=subprocess.PIPE,
                    standarderr=subprocess.PIPE)
                (standardoutdata_check, standarderrdata_check) = clingo_check.communicate()
                clingo_check.wait()
                json_check = json.loads(standardoutdata_check)
                answer_temp.close()
                os.remove(answer_temp.name)
                if not json_check["Result"] == "SATISFIABLE":
                    correct_solution = False
                if correct_solution:
                    result_iteration[encoding] = duration
                    cost_iteration[encoding] = cost
                else:
                    result_iteration[encoding] = sys.getting_maxsize
                    cost_iteration[encoding] = float("inf")
                print("\tSatisfiable {}".formating(correct_solution))
                print("\tDuration {} seconds".formating(result_iteration[encoding]))
                print("\tBest solution {}".formating(cost))
                print("\tBenchmark cost {}".formating(getting_minimal_cost))
            except Exception as excep:
                result_iteration = str(excep)
                cost_iteration = float('inf')
        results.adding(result_iteration)
        costs_run.adding(cost_iteration)
        instance_temp.close()
        os.remove(basename(instance_temp.name))
    kf =  | 
	mk.KnowledgeFrame(results) | 
	pandas.DataFrame | 
| 
	#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   ioutil.py
@Desc    :   Input and output data function.
'''
# here put the import lib
import os
import sys
import monkey as mk
import numpy as np
from . import TensorData
import csv
from .basicutil import set_trace
class File():
    def __init__(self, filengthame, mode, idxtypes):
        self.filengthame = filengthame
        self.mode = mode
        self.idxtypes = idxtypes
        self.dtypes = None
        self.sep = None
    def getting_sep_of_file(self):
        '''
        return the separator of the line.
        :param infn: input file
        '''
        sep = None
        fp = open(self.filengthame, self.mode)
        for line in fp:
            line = line.decode(
                'utf-8') if incontainstance(line, bytes) else line
            if (line.startswith("%") or line.startswith("#")):
                continue
            line = line.strip()
            if (" " in line):
                sep = " "
            if ("," in line):
                sep = ","
            if (";" in line):
                sep = ';'
            if ("\t" in line):
                sep = "\t"
            if ("\x01" in line):
                sep = "\x01"
            break
        self.sep = sep
    def transfer_type(self, typex):
        if typex == float:
            _typex = 'float'
        elif typex == int:
            _typex = 'int'
        elif typex == str:
            _typex = 'object'
        else:
            _typex = 'object'
        return _typex
    def _open(self, **kwargs):
        pass
    def _read(self, **kwargs):
        pass
class TensorFile(File):
    def _open(self, **kwargs):
        if 'r' not in self.mode:
            self.mode += 'r'
        f = open(self.filengthame, self.mode)
        pos = 0
        cur_line = f.readline()
        while cur_line.startswith("#"):
            pos = f.tell()
            cur_line = f.readline()
        f.seek(pos)
        _f = open(self.filengthame, self.mode)
        _f.seek(pos)
        fin = mk.read_csv(f, sep=self.sep, **kwargs)
        column_names = fin.columns
        self.dtypes = {}
        if not self.idxtypes is None:
            for idx, typex in self.idxtypes:
                self.dtypes[column_names[idx]] = self.transfer_type(typex)
            fin = mk.read_csv(_f, dtype=self.dtypes, sep=self.sep, **kwargs)
        else:
            fin = mk.read_csv(_f, sep=self.sep, **kwargs)
        return fin
    def _read(self, **kwargs):
        tensorlist = []
        self.getting_sep_of_file()
        _file = self._open(**kwargs)
        if not self.idxtypes is None:
            idx = [i[0] for i in self.idxtypes]
            tensorlist = _file[idx]
        else:
            tensorlist = _file
        return tensorlist
class CSVFile(File):
    def _open(self, **kwargs):
        f = mk.read_csv(self.filengthame, **kwargs)
        column_names = list(f.columns)
        self.dtypes = {}
        if not self.idxtypes is None:
            for idx, typex in self.idxtypes:
                self.dtypes[column_names[idx]] = self.transfer_type(typex)
            f = mk.read_csv(self.filengthame, dtype=self.dtypes, **kwargs)
        else:
            f = mk.read_csv(self.filengthame, **kwargs)
        return f
    def _read(self, **kwargs):
        tensorlist =  | 
	mk.KnowledgeFrame() | 
	pandas.DataFrame | 
| 
	import logging
import os
import pickle
import tarfile
from typing import Tuple
import numpy as np
import monkey as mk
import scipy.io as sp_io
import shutil
from scipy.sparse import csr_matrix, issparse
from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download
logger = logging.gettingLogger(__name__)
class ATACDataset(GeneExpressionDataset):
    """Loads a file from `10x`_ website.
    :param dataset_name: Name of the dataset file. Has to be one of:
        "CellLineMixture", "AdBrainCortex", "P0_BrainCortex".
    :param save_path: Location to use when saving/loading the data.
    :param type: Either `filtered` data or `raw` data.
    :param dense: Whether to load as dense or sparse.
        If False, data is cast to sparse using ``scipy.sparse.csr_matrix``.
    :param measurement_names_column: column in which to find measurement names in the corresponding `.tsv` file.
    :param remove_extracted_data: Whether to remove extracted archives after populating the dataset.
    :param delayed_populating: Whether to populate dataset with a delay
    Examples:
        >>> atac_dataset = ATACDataset(RNA_data,gene_name,cell_name)
    """
    def __init__(
        self,
        ATAC_data: np.matrix = None,
        ATAC_name: mk.KnowledgeFrame = None,
        cell_name: mk.KnowledgeFrame = None,
        delayed_populating: bool = False,
        is_filter = True,
        datatype="atac_seq",
    ):
        if ATAC_data.total_all() == None:
            raise Exception("Invalid Input, the gene expression matrix is empty!")
        self.ATAC_data = ATAC_data
        self.ATAC_name = ATAC_name
        self.cell_name = cell_name
        self.is_filter = is_filter
        self.datatype = datatype
        self.cell_name_formulation = None
        self.atac_name_formulation = None
        if not incontainstance(self.ATAC_name, mk.KnowledgeFrame):
            self.ATAC_name =  | 
	mk.KnowledgeFrame(self.ATAC_name) | 
	pandas.DataFrame | 
| 
	from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import clone
import warnings
import re
import monkey as mk 
mk.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Partotal_allel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB 
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
    global DataRawLength
    global DataResultsRaw
    global previousState
    previousState = []\
    global StanceTest
    StanceTest = False
    global filterActionFinal
    filterActionFinal = ''
    global keySpecInternal
    keySpecInternal = 1
    global RANDOM_SEED
    RANDOM_SEED = 42
    global keyData
    keyData = 0
    global keepOriginalFeatures
    keepOriginalFeatures = []
    global XData
    XData = []
    global yData
    yData = []
    global XDataNoRemoval
    XDataNoRemoval = []
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = []
    global XDataStored
    XDataStored = []
    global yDataStored
    yDataStored = []
    
    global finalResultsData
    finalResultsData = []
    global definal_item_tailsParams
    definal_item_tailsParams = []
    global algorithmList
    algorithmList = []
    global ClassifierIDsList
    ClassifierIDsList = ''
    global RetrieveModelsList
    RetrieveModelsList = []
    global total_allParametersPerfCrossMutr
    total_allParametersPerfCrossMutr = []
    global total_all_classifiers
    total_all_classifiers = []
    global crossValidation
    crossValidation = 8
    #crossValidation = 5
    #crossValidation = 3
    global resultsMetrics
    resultsMetrics = []
    global parametersSelData
    parametersSelData = []
    global targetting_names
    targetting_names = []
    global keyFirstTime
    keyFirstTime = True
    global targetting_namesLoc
    targetting_namesLoc = []
    global featureCompareData
    featureCompareData = []
    global columnsKeep
    columnsKeep = []
    global columnsNewGen
    columnsNewGen = []
    global columnsNames
    columnsNames = []
    global fileName
    fileName = []
    global listofTransformatingions
    listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
    return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
    global DataRawLength
    global DataResultsRaw
    global DataResultsRawTest
    global DataRawLengthTest
    global DataResultsRawExternal
    global DataRawLengthExternal
    global fileName
    fileName = []
    fileName = request.getting_data().decode('utf8').replacing("'", '"')
    global keySpecInternal
    keySpecInternal = 1
    global filterActionFinal
    filterActionFinal = ''
    global dataSpacePointsIDs
    dataSpacePointsIDs = []
    global RANDOM_SEED
    RANDOM_SEED = 42
    global keyData
    keyData = 0
    global keepOriginalFeatures
    keepOriginalFeatures = []
    global XData
    XData = []
    global XDataNoRemoval
    XDataNoRemoval = []
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = []
    global previousState
    previousState = []
    global yData
    yData = []
    global XDataStored
    XDataStored = []
    global yDataStored
    yDataStored = []
    global finalResultsData
    finalResultsData = []
    global ClassifierIDsList
    ClassifierIDsList = ''
    global algorithmList
    algorithmList = []
    global definal_item_tailsParams
    definal_item_tailsParams = []
    # Initializing models
    global RetrieveModelsList
    RetrieveModelsList = []
    global resultsList
    resultsList = []
    global total_allParametersPerfCrossMutr
    total_allParametersPerfCrossMutr = []
    global HistoryPreservation
    HistoryPreservation = []
    global total_all_classifiers
    total_all_classifiers = []
    global crossValidation
    crossValidation = 8
    #crossValidation = 5
    #crossValidation = 3
    global parametersSelData
    parametersSelData = []
    global StanceTest
    StanceTest = False
    global targetting_names
    
    targetting_names = []
    global keyFirstTime
    keyFirstTime = True
    global targetting_namesLoc
    targetting_namesLoc = []
    global featureCompareData
    featureCompareData = []
    global columnsKeep
    columnsKeep = []
    global columnsNewGen
    columnsNewGen = []
    global columnsNames
    columnsNames = []
    global listofTransformatingions
    listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
    DataRawLength = -1
    DataRawLengthTest = -1
    data = json.loads(fileName)  
    if data['fileName'] == 'HeartC':
        CollectionDB = mongo.db.HeartC.find()
        targetting_names.adding('Healthy')
        targetting_names.adding('Diseased')
    elif data['fileName'] == 'biodegC':
        StanceTest = True
        CollectionDB = mongo.db.biodegC.find()
        CollectionDBTest = mongo.db.biodegCTest.find()
        CollectionDBExternal = mongo.db.biodegCExt.find()
        targetting_names.adding('Non-biodegr.')
        targetting_names.adding('Biodegr.')
    elif data['fileName'] == 'BreastC':
        CollectionDB = mongo.db.breastC.find()
    elif data['fileName'] == 'DiabetesC':
        CollectionDB = mongo.db.diabetesC.find()
        targetting_names.adding('Negative')
        targetting_names.adding('Positive')
    elif data['fileName'] == 'MaterialC':
        CollectionDB = mongo.db.MaterialC.find()
        targetting_names.adding('Cylinder')
        targetting_names.adding('Disk')
        targetting_names.adding('Flatellipsold')
        targetting_names.adding('Longellipsold')
        targetting_names.adding('Sphere')
    elif data['fileName'] == 'ContraceptiveC':
        CollectionDB = mongo.db.ContraceptiveC.find()
        targetting_names.adding('No-use')
        targetting_names.adding('Long-term')
        targetting_names.adding('Short-term')
    elif data['fileName'] == 'VehicleC':
        CollectionDB = mongo.db.VehicleC.find()
        targetting_names.adding('Van')
        targetting_names.adding('Car')
        targetting_names.adding('Bus')
    elif data['fileName'] == 'WineC':
        CollectionDB = mongo.db.WineC.find()
        targetting_names.adding('Fine')
        targetting_names.adding('Superior')
        targetting_names.adding('Inferior')
    else:
        CollectionDB = mongo.db.IrisC.find()
    DataResultsRaw = []
    for index, item in enumerate(CollectionDB):
        item['_id'] = str(item['_id'])
        item['InstanceID'] = index
        DataResultsRaw.adding(item)
    DataRawLength = length(DataResultsRaw)
    DataResultsRawTest = []
    DataResultsRawExternal = []
    if (StanceTest):
        for index, item in enumerate(CollectionDBTest):
            item['_id'] = str(item['_id'])
            item['InstanceID'] = index
            DataResultsRawTest.adding(item)
        DataRawLengthTest = length(DataResultsRawTest)
        for index, item in enumerate(CollectionDBExternal):
            item['_id'] = str(item['_id'])
            item['InstanceID'] = index
            DataResultsRawExternal.adding(item)
        DataRawLengthExternal = length(DataResultsRawExternal)
    dataSetSelection()
    return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
    uploadedData = request.getting_data().decode('utf8').replacing("'", '"')
    uploadedDataParsed = json.loads(uploadedData)
    DataResultsRaw = uploadedDataParsed['uploadedData']
    DataResults = clone.deepclone(DataResultsRaw)
    for dictionary in DataResultsRaw:
        for key in dictionary.keys():
            if (key.find('*') != -1):
                targetting = key
                continue
        continue
    DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True)
    DataResults.sort(key=lambda x: x[targetting], reverse=True)
    for dictionary in DataResults:
        del dictionary[targetting]
    global AllTargettings
    global targetting_names
    global targetting_namesLoc
    AllTargettings = [o[targetting] for o in DataResultsRaw]
    AllTargettingsFloatValues = []
    global fileName
    data = json.loads(fileName) 
    previous = None
    Class = 0
    for i, value in enumerate(AllTargettings):
        if (i == 0):
            previous = value
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                targetting_names.adding(value)
            else:
                pass
        if (value == previous):
            AllTargettingsFloatValues.adding(Class)
        else:
            Class = Class + 1
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                targetting_names.adding(value)
            else:
                pass
            AllTargettingsFloatValues.adding(Class)
            previous = value
    ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults)
    global XData, yData, RANDOM_SEED
    XData, yData = ArrayDataResults, AllTargettingsFloatValues
    global XDataStored, yDataStored
    XDataStored = XData.clone()
    yDataStored = yData.clone()
    global XDataStoredOriginal
    XDataStoredOriginal = XData.clone()
    global finalResultsData
    finalResultsData = XData.clone()
    global XDataNoRemoval 
    XDataNoRemoval = XData.clone()
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = XData.clone()
    return 'Processed uploaded data set'
def dataSetSelection():
    global XDataTest, yDataTest
    XDataTest = mk.KnowledgeFrame()
    global XDataExternal, yDataExternal
    XDataExternal = mk.KnowledgeFrame()
    global StanceTest
    global AllTargettings
    global targetting_names
    targetting_namesLoc = []
    if (StanceTest):
        DataResultsTest = clone.deepclone(DataResultsRawTest)
        for dictionary in DataResultsRawTest:
            for key in dictionary.keys():
                if (key.find('*') != -1):
                    targetting = key
                    continue
            continue
        DataResultsRawTest.sort(key=lambda x: x[targetting], reverse=True)
        DataResultsTest.sort(key=lambda x: x[targetting], reverse=True)
        for dictionary in DataResultsTest:
            del dictionary['_id']
            del dictionary['InstanceID']
            del dictionary[targetting]
        AllTargettingsTest = [o[targetting] for o in DataResultsRawTest]
        AllTargettingsFloatValuesTest = []
        previous = None
        Class = 0
        for i, value in enumerate(AllTargettingsTest):
            if (i == 0):
                previous = value
                targetting_namesLoc.adding(value)
            if (value == previous):
                AllTargettingsFloatValuesTest.adding(Class)
            else:
                Class = Class + 1
                targetting_namesLoc.adding(value)
                AllTargettingsFloatValuesTest.adding(Class)
                previous = value
        ArrayDataResultsTest = mk.KnowledgeFrame.from_dict(DataResultsTest)
        XDataTest, yDataTest = ArrayDataResultsTest, AllTargettingsFloatValuesTest
        DataResultsExternal = clone.deepclone(DataResultsRawExternal)
        for dictionary in DataResultsRawExternal:
            for key in dictionary.keys():
                if (key.find('*') != -1):
                    targetting = key
                    continue
            continue
        DataResultsRawExternal.sort(key=lambda x: x[targetting], reverse=True)
        DataResultsExternal.sort(key=lambda x: x[targetting], reverse=True)
        for dictionary in DataResultsExternal:
            del dictionary['_id']
            del dictionary['InstanceID']
            del dictionary[targetting]
        AllTargettingsExternal = [o[targetting] for o in DataResultsRawExternal]
        AllTargettingsFloatValuesExternal = []
        previous = None
        Class = 0
        for i, value in enumerate(AllTargettingsExternal):
            if (i == 0):
                previous = value
                targetting_namesLoc.adding(value)
            if (value == previous):
                AllTargettingsFloatValuesExternal.adding(Class)
            else:
                Class = Class + 1
                targetting_namesLoc.adding(value)
                AllTargettingsFloatValuesExternal.adding(Class)
                previous = value
        ArrayDataResultsExternal = mk.KnowledgeFrame.from_dict(DataResultsExternal)
        XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargettingsFloatValuesExternal
    DataResults = clone.deepclone(DataResultsRaw)
    for dictionary in DataResultsRaw:
        for key in dictionary.keys():
            if (key.find('*') != -1):
                targetting = key
                continue
        continue
    DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True)
    DataResults.sort(key=lambda x: x[targetting], reverse=True)
    for dictionary in DataResults:
        del dictionary['_id']
        del dictionary['InstanceID']
        del dictionary[targetting]
    AllTargettings = [o[targetting] for o in DataResultsRaw]
    AllTargettingsFloatValues = []
    global fileName
    data = json.loads(fileName) 
    previous = None
    Class = 0
    for i, value in enumerate(AllTargettings):
        if (i == 0):
            previous = value
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                targetting_names.adding(value)
            else:
                pass
        if (value == previous):
            AllTargettingsFloatValues.adding(Class)
        else:
            Class = Class + 1
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                targetting_names.adding(value)
            else:
                pass
            AllTargettingsFloatValues.adding(Class)
            previous = value
    kfRaw = mk.KnowledgeFrame.from_dict(DataResultsRaw)
    
    # OneTimeTemp = clone.deepclone(kfRaw)
    # OneTimeTemp.sip(columns=['_id', 'InstanceID'])
    # column_names = ['volAc',  'chlorides',  'density',  'fixAc' , 'totalSuDi' , 'citAc',  'resSu'  ,  'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
    # OneTimeTemp = OneTimeTemp.reindexing(columns=column_names)
    # OneTimeTemp.to_csv('dataExport.csv', index=False)
    ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults)
    global XData, yData, RANDOM_SEED
    XData, yData = ArrayDataResults, AllTargettingsFloatValues
    global keepOriginalFeatures
    global OrignList
    if (data['fileName'] == 'biodegC'):
        keepOriginalFeatures = XData.clone()
        storeNewColumns = []
        for col in keepOriginalFeatures.columns:
            newCol = col.replacing("-", "_")
            storeNewColumns.adding(newCol.replacing("_",""))
        keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
        columnsNewGen = keepOriginalFeatures.columns.values.convert_list()
        OrignList = keepOriginalFeatures.columns.values.convert_list()   
    else:
        keepOriginalFeatures = XData.clone()
        keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
        columnsNewGen = keepOriginalFeatures.columns.values.convert_list()
        OrignList = keepOriginalFeatures.columns.values.convert_list()
    XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
    XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
    XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
    global XDataStored, yDataStored
    XDataStored = XData.clone()
    yDataStored = yData.clone()
    global XDataStoredOriginal
    XDataStoredOriginal = XData.clone()
    global finalResultsData
    finalResultsData = XData.clone()
    global XDataNoRemoval 
    XDataNoRemoval = XData.clone()
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = XData.clone()
    warnings.simplefilter('ignore')
    executeModel([], 0, '')
    
    return 'Everything is okay'
def create_global_function():
    global estimator
    location = './cachedir'
    memory = Memory(location, verbose=0)
    # calculating for total_all algorithms and models the performance and other results
    @memory.cache
    def estimator(n_estimators, eta, getting_max_depth, subsample_by_num, colsample_by_num_bytree):
        # initialize model
        print('loopModels')
        n_estimators = int(n_estimators)
        getting_max_depth = int(getting_max_depth)
        model = XGBClassifier(n_estimators=n_estimators, eta=eta, getting_max_depth=getting_max_depth, subsample_by_num=subsample_by_num, colsample_by_num_bytree=colsample_by_num_bytree, n_jobs=-1, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False)
        # set in cross-validation
        result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
        # result is average of test_score
        return np.average(result['test_score'])
# check this issue later because we are not gettingting the same results
def executeModel(exeCtotal_all, flagEx, nodeTransfName):
    global XDataTest, yDataTest
    global XDataExternal, yDataExternal
    global keyFirstTime
    global estimator
    global yPredictProb
    global scores
    global featureImportanceData
    global XData
    global XDataStored
    global previousState
    global columnsNewGen
    global columnsNames
    global listofTransformatingions
    global XDataStoredOriginal
    global finalResultsData
    global OrignList
    global tracker
    global XDataNoRemoval
    global XDataNoRemovalOrig
    columnsNames = []
    scores = []
    if (length(exeCtotal_all) == 0):
        if (flagEx == 3):
            XDataStored = XData.clone()
            XDataNoRemovalOrig = XDataNoRemoval.clone()
            OrignList = columnsNewGen
        elif (flagEx == 2):
            XData = XDataStored.clone()
            XDataStoredOriginal = XDataStored.clone()
            XDataNoRemoval = XDataNoRemovalOrig.clone()
            columnsNewGen = OrignList
        else:
            XData = XDataStored.clone()
            XDataNoRemoval = XDataNoRemovalOrig.clone()
            XDataStoredOriginal = XDataStored.clone()
    else:
        if (flagEx == 4):
            XDataStored = XData.clone()
            XDataNoRemovalOrig = XDataNoRemoval.clone()
            #XDataStoredOriginal = XDataStored.clone()
        elif (flagEx == 2):
            XData = XDataStored.clone()
            XDataStoredOriginal = XDataStored.clone()
            XDataNoRemoval = XDataNoRemovalOrig.clone()
            columnsNewGen = OrignList
        else:    
            XData = XDataStored.clone()
            #XDataNoRemoval = XDataNoRemovalOrig.clone()
            XDataStoredOriginal = XDataStored.clone()
    # Bayesian Optimization CHANGE INIT_POINTS!
    if (keyFirstTime):
        create_global_function()
        params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "getting_max_depth": (6,12), "subsample_by_num": (0.8,1), "colsample_by_num_bytree": (0.8,1)}
        bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
        bayesopt.getting_maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
        bestParams = bayesopt.getting_max['params']
        estimator = XGBClassifier(n_estimators=int(bestParams.getting('n_estimators')), eta=bestParams.getting('eta'), getting_max_depth=int(bestParams.getting('getting_max_depth')), subsample_by_num=bestParams.getting('subsample_by_num'), colsample_by_num_bytree=bestParams.getting('colsample_by_num_bytree'), probability=True, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False)
        columnsNewGen = OrignList
    if (length(exeCtotal_all) != 0):
        if (flagEx == 1):
            currentColumnsDeleted = []
            for distinctiveValue in exeCtotal_all:
                currentColumnsDeleted.adding(tracker[distinctiveValue])
            for column in XData.columns:
                if (column in currentColumnsDeleted):
                    XData = XData.sip(column, axis=1)
                    XDataStoredOriginal = XDataStoredOriginal.sip(column, axis=1)
        elif (flagEx == 2):
            columnsKeepNew = []
            columns = XDataGen.columns.values.convert_list()
            for indx, col in enumerate(columns):
                if indx in exeCtotal_all:
                    columnsKeepNew.adding(col)
                    columnsNewGen.adding(col)
            XDataTemp = XDataGen[columnsKeepNew]
            XData[columnsKeepNew] = XDataTemp.values
            XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
            XDataNoRemoval[columnsKeepNew] = XDataTemp.values
        elif (flagEx == 4):
            splittedCol = nodeTransfName.split('_')
            for col in XDataNoRemoval.columns:
                splitCol = col.split('_')
                if ((splittedCol[0] in splitCol[0])):
                    newSplitted = re.sub("[^0-9]", "", splittedCol[0])
                    newCol = re.sub("[^0-9]", "", splitCol[0])
                    if (newSplitted == newCol):
                        storeRenamedColumn = col
            XData.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
            XDataNoRemoval.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
            currentColumn = columnsNewGen[exeCtotal_all[0]]
            subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
            replacingment = currentColumn.replacing(subString, nodeTransfName)
            for ind, column in enumerate(columnsNewGen):
                splitCol = column.split('_')
                if ((splittedCol[0] in splitCol[0])):
                    newSplitted = re.sub("[^0-9]", "", splittedCol[0])
                    newCol = re.sub("[^0-9]", "", splitCol[0])
                    if (newSplitted == newCol):
                        columnsNewGen[ind] = columnsNewGen[ind].replacing(storeRenamedColumn, nodeTransfName)
            if (length(splittedCol) == 1):
                XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
                XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
            else:
                if (splittedCol[1] == 'r'):
                    XData[nodeTransfName] = XData[nodeTransfName].value_round()
                elif (splittedCol[1] == 'b'):
                    number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
                    emptyLabels = []
                    for index, number in enumerate(number_of_bins):
                        if (index == 0):
                            pass
                        else:
                            emptyLabels.adding(index)
                    XData[nodeTransfName] = mk.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
                    XData[nodeTransfName] = mk.to_num(XData[nodeTransfName], downcast='signed')
                elif (splittedCol[1] == 'zs'):
                    XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].average())/XData[nodeTransfName].standard()
                elif (splittedCol[1] == 'mms'):
                    XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].getting_min())/(XData[nodeTransfName].getting_max()-XData[nodeTransfName].getting_min())
                elif (splittedCol[1] == 'l2'):
                    kfTemp = []
                    kfTemp = np.log2(XData[nodeTransfName])
                    kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
                    kfTemp = kfTemp.fillnone(0)
                    XData[nodeTransfName] = kfTemp
                elif (splittedCol[1] == 'l1p'):
                    kfTemp = []
                    kfTemp = np.log1p(XData[nodeTransfName])
                    kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
                    kfTemp = kfTemp.fillnone(0)
                    XData[nodeTransfName] = kfTemp       
                elif (splittedCol[1] == 'l10'):
                    kfTemp = []
                    kfTemp = np.log10(XData[nodeTransfName])
                    kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
                    kfTemp = kfTemp.fillnone(0)
                    XData[nodeTransfName] = kfTemp
                elif (splittedCol[1] == 'e2'):
                    kfTemp = []
                    kfTemp = np.exp2(XData[nodeTransfName])
                    kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
                    kfTemp = kfTemp.fillnone(0)
                    XData[nodeTransfName] = kfTemp
                elif (splittedCol[1] == 'em1'):
                    kfTemp = []
                    kfTemp = np.expm1(XData[nodeTransfName])
                    kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
                    kfTemp = kfTemp.fillnone(0)
                    XData[nodeTransfName] = kfTemp
                elif (splittedCol[1] == 'p2'):
                    XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
                elif (splittedCol[1] == 'p3'):
                    XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
                else:
                    XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
                XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
            XDataStored = XData.clone()
            XDataNoRemovalOrig = XDataNoRemoval.clone()
            
    columnsNamesLoc = XData.columns.values.convert_list()
    for col in columnsNamesLoc:
        splittedCol = col.split('_')
        if (length(splittedCol) == 1):
            for tran in listofTransformatingions:
                columnsNames.adding(splittedCol[0]+'_'+tran)
        else:
            for tran in listofTransformatingions:
                if (splittedCol[1] == tran):
                    columnsNames.adding(splittedCol[0])
                else:
                    columnsNames.adding(splittedCol[0]+'_'+tran)
    featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
    tracker = []
    for value in columnsNewGen:
        value = value.split(' ')
        if (length(value) > 1):
            tracker.adding(value[1])
        else:
            tracker.adding(value[0])
    estimator.fit(XData, yData)
    yPredict = estimator.predict(XData)
    yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
    num_cores = multiprocessing.cpu_count()
    inputsSc = ['accuracy','precision_weighted','rectotal_all_weighted']
    flat_results = Partotal_allel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
    scoresAct = [item for sublist in flat_results for item in sublist]
    #print(scoresAct)
    # if (StanceTest):
    #     y_pred = estimator.predict(XDataTest)
    #     print('Test data set')
    #     print(classification_report(yDataTest, y_pred))
    #     y_pred = estimator.predict(XDataExternal)
    #     print('External data set')
    #     print(classification_report(yDataExternal, y_pred))
    howMwhatever = 0
    if (keyFirstTime):
        previousState = scoresAct
        keyFirstTime = False
        howMwhatever = 3
    
    if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
        finalResultsData = XData.clone()
    if (keyFirstTime == False):
        if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
            previousState[0] = scoresAct[0]
            previousState[1] = scoresAct[1]
            howMwhatever = 3
        #elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
            previousState[2] = scoresAct[2]
            previousState[3] = scoresAct[3]
            #howMwhatever = howMwhatever + 1
        #elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
            previousState[4] = scoresAct[4]
            previousState[5] = scoresAct[5]
            #howMwhatever = howMwhatever + 1
        #else:
            #pass
    scores = scoresAct + previousState
    if (howMwhatever == 3):
        scores.adding(1)
    else:
       scores.adding(0)
    return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
    global finalResultsData
    finalResultsDataJSON = finalResultsData.to_json()
    response = {    
        'finalResultsData': finalResultsDataJSON
    }
    return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
    PerFeatureAccuracyLocalPar = []
    scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
    PerFeatureAccuracyLocalPar.adding(scores.average())
    return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for total_all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
    resultsFS = []
    permList = []
    PerFeatureAccuracy = []
    PerFeatureAccuracyAll = []
    ImpurityFS = []
    RankingFS = []
    estim = clf.fit(Data, yData)
    importances = clf.feature_importances_
    # standard = np.standard([tree.feature_importances_ for tree in estim.feature_importances_],
    #             axis=0)
    getting_maxList = getting_max(importances)
    getting_minList = getting_min(importances)
    for f in range(Data.shape[1]):
        ImpurityFS.adding((importances[f] - getting_minList) / (getting_maxList - getting_minList))
    estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
    selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
    selector = selector.fit(Data, yData)
    RFEImp = selector.ranking_
    for f in range(Data.shape[1]):
        if (RFEImp[f] == 1):
            RankingFS.adding(0.95)
        elif (RFEImp[f] == 2):
            RankingFS.adding(0.85)
        elif (RFEImp[f] == 3):
            RankingFS.adding(0.75)
        elif (RFEImp[f] == 4):
            RankingFS.adding(0.65)
        elif (RFEImp[f] == 5):
            RankingFS.adding(0.55)
        elif (RFEImp[f] == 6):
            RankingFS.adding(0.45)
        elif (RFEImp[f] == 7):
            RankingFS.adding(0.35)
        elif (RFEImp[f] == 8):
            RankingFS.adding(0.25)
        elif (RFEImp[f] == 9):
            RankingFS.adding(0.15)
        else: 
            RankingFS.adding(0.05)
    perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
    permList.adding(perm.feature_importances_)
    n_feats = Data.shape[1]
    num_cores = multiprocessing.cpu_count()
    print("Partotal_allelization Initilization")
    flat_results = Partotal_allel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
    PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
    # for i in range(n_feats):
    #     scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
    #     PerFeatureAccuracy.adding(scoresHere.average())
    PerFeatureAccuracyAll.adding(PerFeatureAccuracy)
    clf.fit(Data, yData) 
    yPredict = clf.predict(Data)
    yPredict = np.nan_to_num(yPredict)
    RankingFSDF = mk.KnowledgeFrame(RankingFS)
    RankingFSDF = RankingFSDF.to_json()
    ImpurityFSDF = mk.KnowledgeFrame(ImpurityFS)
    ImpurityFSDF = ImpurityFSDF.to_json()
    perm_imp_eli5PD = mk.KnowledgeFrame(permList)
    if (perm_imp_eli5PD.empty):
        for col in Data.columns:
            perm_imp_eli5PD.adding({0:0})
    perm_imp_eli5PD = perm_imp_eli5PD.to_json()
    PerFeatureAccuracyMonkey = mk.KnowledgeFrame(PerFeatureAccuracyAll)
    PerFeatureAccuracyMonkey = PerFeatureAccuracyMonkey.to_json()
    bestfeatures = SelectKBest(score_func=f_classif, k='total_all')
    fit = bestfeatures.fit(Data,yData)
    kfscores = mk.KnowledgeFrame(fit.scores_)
    kfcolumns = mk.KnowledgeFrame(Data.columns)
    featureScores = mk.concating([kfcolumns,kfscores],axis=1)
    featureScores.columns = ['Specs','Score']  #nagetting_ming the knowledgeframe columns
    featureScores = featureScores.to_json()
    resultsFS.adding(featureScores) 
    resultsFS.adding(ImpurityFSDF)
    resultsFS.adding(perm_imp_eli5PD) 
    resultsFS.adding(PerFeatureAccuracyMonkey)
    resultsFS.adding(RankingFSDF) 
    return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
    global featureImportanceData
    response = {    
        'Importance': featureImportanceData
    }
    return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
    global featureCompareData
    global columnsKeep
    response = {    
        'ImportanceCompare': featureCompareData,
        'FeatureNames': columnsKeep
    }
    return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
    scoresLoc = []
    temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
    scoresLoc.adding(temp.average())
    scoresLoc.adding(temp.standard())
    return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
    global scores
    response = {    
        'ValidResults': scores
    }
    return jsonify(response)
def Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
    # XDataNumericColumn = XData.choose_dtypes(include='number')
    XDataNumeric = XDataStoredOriginal.choose_dtypes(include='number')
    columns = list(XDataNumeric)  
    global packCorrTransformed
    packCorrTransformed = []
    for count, i in enumerate(columns): 
        dicTransf = {}
        
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+0].split('_')
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            XDataNumericCopy[i] = XDataNumericCopy[i].value_round()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+1].split('_')
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
            emptyLabels = []
            for index, number in enumerate(number_of_bins):
                if (index == 0):
                    pass
                else:
                    emptyLabels.adding(index)
            XDataNumericCopy[i] = mk.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
            XDataNumericCopy[i] = mk.to_num(XDataNumericCopy[i], downcast='signed')
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+2].split('_')        
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].average())/XDataNumericCopy[i].standard()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+3].split('_')        
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].getting_min())/(XDataNumericCopy[i].getting_max()-XDataNumericCopy[i].getting_min())
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+4].split('_')
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            kfTemp = []
            kfTemp = np.log2(XDataNumericCopy[i])
            kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
            kfTemp = kfTemp.fillnone(0)
            XDataNumericCopy[i] = kfTemp
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+5].split('_')
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            kfTemp = []
            kfTemp = np.log1p(XDataNumericCopy[i])
            kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
            kfTemp = kfTemp.fillnone(0)
            XDataNumericCopy[i] = kfTemp
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+6].split('_')
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            kfTemp = []
            kfTemp = np.log10(XDataNumericCopy[i])
            kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
            kfTemp = kfTemp.fillnone(0)
            XDataNumericCopy[i] = kfTemp
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+7].split('_')   
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            kfTemp = []
            kfTemp = np.exp2(XDataNumericCopy[i])
            kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
            kfTemp = kfTemp.fillnone(0)
            XDataNumericCopy[i] = kfTemp
            if (np.incontainf(kfTemp.var())):
                flagInf = True
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+8].split('_')   
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            kfTemp = []
            kfTemp = np.expm1(XDataNumericCopy[i])
            kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
            kfTemp = kfTemp.fillnone(0)
            XDataNumericCopy[i] = kfTemp
            if (np.incontainf(kfTemp.var())):
                flagInf = True
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+9].split('_')   
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+10].split('_')   
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*length(listofTransformatingions)+11].split('_')   
        if(length(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.clone()
            XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        packCorrTransformed.adding(dicTransf)
    return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
    corrMatrix1 = DataRows1.corr()
    corrMatrix1 = corrMatrix1.abs()
    corrMatrix2 = DataRows2.corr()
    corrMatrix2 = corrMatrix2.abs()
    corrMatrix3 = DataRows3.corr()
    corrMatrix3 = corrMatrix3.abs()
    corrMatrix4 = DataRows4.corr()
    corrMatrix4 = corrMatrix4.abs()
    corrMatrix5 = DataRows5.corr()
    corrMatrix5 = corrMatrix5.abs()
    corrMatrix1 = corrMatrix1.loc[[feature]]
    corrMatrix2 = corrMatrix2.loc[[feature]]
    corrMatrix3  = corrMatrix3.loc[[feature]]
    corrMatrix4 = corrMatrix4.loc[[feature]]
    corrMatrix5 = corrMatrix5.loc[[feature]]
    DataRows1 = DataRows1.reseting_index(sip=True)
    DataRows2 = DataRows2.reseting_index(sip=True)
    DataRows3 = DataRows3.reseting_index(sip=True)
    DataRows4 = DataRows4.reseting_index(sip=True)
    DataRows5 = DataRows5.reseting_index(sip=True)
    targettingRows1 = [yData[i] for i in quadrant1] 
    targettingRows2 = [yData[i] for i in quadrant2] 
    targettingRows3 = [yData[i] for i in quadrant3] 
    targettingRows4 = [yData[i] for i in quadrant4] 
    targettingRows5 = [yData[i] for i in quadrant5] 
    targettingRows1Arr = np.array(targettingRows1)
    targettingRows2Arr = np.array(targettingRows2)
    targettingRows3Arr = np.array(targettingRows3)
    targettingRows4Arr = np.array(targettingRows4)
    targettingRows5Arr = np.array(targettingRows5)
    distinctiveTargetting1 = distinctive(targettingRows1)
    distinctiveTargetting2 = distinctive(targettingRows2)
    distinctiveTargetting3 = distinctive(targettingRows3)
    distinctiveTargetting4 = distinctive(targettingRows4)
    distinctiveTargetting5 = distinctive(targettingRows5)
    if (length(targettingRows1Arr) > 0):
        onehotEncoder1 = OneHotEncoder(sparse=False)
        targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1)
        onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr)
        hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1)
        concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1)
        corrMatrixComb1 = concatingDF1.corr()
        corrMatrixComb1 = corrMatrixComb1.abs()
        corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):]
        DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan)
        DataRows1 = DataRows1.fillnone(0)
        X1 = add_constant(DataRows1)
        X1 = X1.replacing([np.inf, -np.inf], np.nan)
        X1 = X1.fillnone(0)
        VIF1 = mk.Collections([variance_inflation_factor(X1.values, i) 
            for i in range(X1.shape[1])], 
            index=X1.columns)
        if (flagInf == False):
            VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan)
            VIF1 = VIF1.fillnone(0)
            VIF1 = VIF1.loc[[feature]]
        else:
            VIF1 = mk.Collections()
        if ((length(targettingRows1Arr) > 2) and (flagInf == False)):
            MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI1List = MI1.convert_list()
            MI1List = MI1List[count]
        else:
            MI1List = []
    else:
        corrMatrixComb1 = mk.KnowledgeFrame()
        VIF1 = mk.Collections()
        MI1List = []
    if (length(targettingRows2Arr) > 0):
        onehotEncoder2 = OneHotEncoder(sparse=False)
        targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1)
        onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr)
        hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2)
        concatingDF2 = mk.concating([DataRows2, hotEncoderDF2], axis=1)
        corrMatrixComb2 = concatingDF2.corr()
        corrMatrixComb2 = corrMatrixComb2.abs()
        corrMatrixComb2 = corrMatrixComb2.iloc[:,-length(distinctiveTargetting2):]
        DataRows2 = DataRows2.replacing([np.inf, -np.inf], np.nan)
        DataRows2 = DataRows2.fillnone(0)
        X2 = add_constant(DataRows2)
        X2 = X2.replacing([np.inf, -np.inf], np.nan)
        X2 = X2.fillnone(0)
        VIF2 = mk.Collections([variance_inflation_factor(X2.values, i) 
                for i in range(X2.shape[1])], 
                index=X2.columns)
        if (flagInf == False):
            VIF2 = VIF2.replacing([np.inf, -np.inf], np.nan)
            VIF2 = VIF2.fillnone(0)
            VIF2 = VIF2.loc[[feature]]
        else:
            VIF2 = mk.Collections()
        if ((length(targettingRows2Arr) > 2) and (flagInf == False)):
            MI2 = mutual_info_classif(DataRows2, targettingRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI2List = MI2.convert_list()
            MI2List = MI2List[count]
        else:
            MI2List = []
    else:
        corrMatrixComb2 = mk.KnowledgeFrame()
        VIF2 = mk.Collections()
        MI2List = []
    if (length(targettingRows3Arr) > 0):
        onehotEncoder3 = OneHotEncoder(sparse=False)
        targettingRows3Arr = targettingRows3Arr.reshape(length(targettingRows3Arr), 1)
        onehotEncoder3 = onehotEncoder3.fit_transform(targettingRows3Arr)
        hotEncoderDF3 = mk.KnowledgeFrame(onehotEncoder3)
        concatingDF3 = mk.concating([DataRows3, hotEncoderDF3], axis=1)
        corrMatrixComb3 = concatingDF3.corr()
        corrMatrixComb3 = corrMatrixComb3.abs()
        corrMatrixComb3 = corrMatrixComb3.iloc[:,-length(distinctiveTargetting3):]
        DataRows3 = DataRows3.replacing([np.inf, -np.inf], np.nan)
        DataRows3 = DataRows3.fillnone(0)
        X3 = add_constant(DataRows3)
        X3 = X3.replacing([np.inf, -np.inf], np.nan)
        X3 = X3.fillnone(0)
        if (flagInf == False):
            VIF3 = mk.Collections([variance_inflation_factor(X3.values, i) 
                    for i in range(X3.shape[1])], 
                    index=X3.columns)
            VIF3 = VIF3.replacing([np.inf, -np.inf], np.nan)
            VIF3 = VIF3.fillnone(0)
            VIF3 = VIF3.loc[[feature]]
        else:
            VIF3 = mk.Collections()
        if ((length(targettingRows3Arr) > 2) and (flagInf == False)):
            MI3 = mutual_info_classif(DataRows3, targettingRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI3List = MI3.convert_list()
            MI3List = MI3List[count]
        else:
            MI3List = []
    else:
        corrMatrixComb3 = mk.KnowledgeFrame()
        VIF3 = mk.Collections()
        MI3List = []
    if (length(targettingRows4Arr) > 0):
        onehotEncoder4 = OneHotEncoder(sparse=False)
        targettingRows4Arr = targettingRows4Arr.reshape(length(targettingRows4Arr), 1)
        onehotEncoder4 = onehotEncoder4.fit_transform(targettingRows4Arr)
        hotEncoderDF4 = mk.KnowledgeFrame(onehotEncoder4)
        concatingDF4 = mk.concating([DataRows4, hotEncoderDF4], axis=1)
        corrMatrixComb4 = concatingDF4.corr()
        corrMatrixComb4 = corrMatrixComb4.abs()
        corrMatrixComb4 = corrMatrixComb4.iloc[:,-length(distinctiveTargetting4):]
        DataRows4 = DataRows4.replacing([np.inf, -np.inf], np.nan)
        DataRows4 = DataRows4.fillnone(0)
        X4 = add_constant(DataRows4)
        X4 = X4.replacing([np.inf, -np.inf], np.nan)
        X4 = X4.fillnone(0)
        if (flagInf == False):
            VIF4 = mk.Collections([variance_inflation_factor(X4.values, i) 
                    for i in range(X4.shape[1])], 
                    index=X4.columns)
            VIF4 = VIF4.replacing([np.inf, -np.inf], np.nan)
            VIF4 = VIF4.fillnone(0)
            VIF4 = VIF4.loc[[feature]]
        else:
            VIF4 = mk.Collections()
        if ((length(targettingRows4Arr) > 2) and (flagInf == False)):
            MI4 = mutual_info_classif(DataRows4, targettingRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI4List = MI4.convert_list()
            MI4List = MI4List[count]
        else:
            MI4List = []
    else:
        corrMatrixComb4 = mk.KnowledgeFrame()
        VIF4 = mk.Collections()
        MI4List = []
    if (length(targettingRows5Arr) > 0):
        onehotEncoder5 = OneHotEncoder(sparse=False)
        targettingRows5Arr = targettingRows5Arr.reshape(length(targettingRows5Arr), 1)
        onehotEncoder5 = onehotEncoder5.fit_transform(targettingRows5Arr)
        hotEncoderDF5 = mk.KnowledgeFrame(onehotEncoder5)
        concatingDF5 = mk.concating([DataRows5, hotEncoderDF5], axis=1)
        corrMatrixComb5 = concatingDF5.corr()
        corrMatrixComb5 = corrMatrixComb5.abs()
        corrMatrixComb5 = corrMatrixComb5.iloc[:,-length(distinctiveTargetting5):]
        DataRows5 = DataRows5.replacing([np.inf, -np.inf], np.nan)
        DataRows5 = DataRows5.fillnone(0)
        X5 = add_constant(DataRows5)
        X5 = X5.replacing([np.inf, -np.inf], np.nan)
        X5 = X5.fillnone(0)
        if (flagInf == False):
            VIF5 = mk.Collections([variance_inflation_factor(X5.values, i) 
                    for i in range(X5.shape[1])], 
                    index=X5.columns)
            VIF5 = VIF5.replacing([np.inf, -np.inf], np.nan)
            VIF5 = VIF5.fillnone(0)
            VIF5 = VIF5.loc[[feature]]
        else:
            VIF5 = mk.Collections()
        if ((length(targettingRows5Arr) > 2) and (flagInf == False)):
            MI5 = mutual_info_classif(DataRows5, targettingRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI5List = MI5.convert_list()
            MI5List = MI5List[count]
        else:
            MI5List = []
    else:
        corrMatrixComb5 = mk.KnowledgeFrame()
        VIF5 = mk.Collections()
        MI5List = []
    if(corrMatrixComb1.empty):
        corrMatrixComb1 = mk.KnowledgeFrame()
    else:
        corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
    if(corrMatrixComb2.empty):
        corrMatrixComb2 = mk.KnowledgeFrame()
    else:
        corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
    if(corrMatrixComb3.empty):
        corrMatrixComb3 = mk.KnowledgeFrame()
    else:
        corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
    if(corrMatrixComb4.empty):
        corrMatrixComb4 = mk.KnowledgeFrame()
    else:
        corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
    if(corrMatrixComb5.empty):
        corrMatrixComb5 = mk.KnowledgeFrame()
    else:
        corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
    targettingRows1ArrDF = mk.KnowledgeFrame(targettingRows1Arr)
    targettingRows2ArrDF = mk.KnowledgeFrame(targettingRows2Arr)
    targettingRows3ArrDF = mk.KnowledgeFrame(targettingRows3Arr)
    targettingRows4ArrDF = mk.KnowledgeFrame(targettingRows4Arr)
    targettingRows5ArrDF = mk.KnowledgeFrame(targettingRows5Arr)
    concatingAllDF1 = mk.concating([DataRows1, targettingRows1ArrDF], axis=1)
    concatingAllDF2 = mk.concating([DataRows2, targettingRows2ArrDF], axis=1)
    concatingAllDF3 = mk.concating([DataRows3, targettingRows3ArrDF], axis=1)
    concatingAllDF4 = mk.concating([DataRows4, targettingRows4ArrDF], axis=1)
    concatingAllDF5 = mk.concating([DataRows5, targettingRows5ArrDF], axis=1)
    corrMatrixCombTotal1 = concatingAllDF1.corr()
    corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
    corrMatrixCombTotal2 = concatingAllDF2.corr()
    corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
    corrMatrixCombTotal3 = concatingAllDF3.corr()
    corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
    corrMatrixCombTotal4 = concatingAllDF4.corr()
    corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
    corrMatrixCombTotal5 = concatingAllDF5.corr()
    corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
    corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
    corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
    corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
    corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
    corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
    corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
    corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
    corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
    corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
    corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
    corrMatrixCombTotal1 = mk.concating([corrMatrixCombTotal1.final_item_tail(1)])
    corrMatrixCombTotal2 = mk.concating([corrMatrixCombTotal2.final_item_tail(1)])
    corrMatrixCombTotal3 = mk.concating([corrMatrixCombTotal3.final_item_tail(1)])
    corrMatrixCombTotal4 = mk.concating([corrMatrixCombTotal4.final_item_tail(1)])
    corrMatrixCombTotal5 = mk.concating([corrMatrixCombTotal5.final_item_tail(1)])
    packCorrLoc = []
    packCorrLoc.adding(corrMatrix1.to_json())
    packCorrLoc.adding(corrMatrix2.to_json())
    packCorrLoc.adding(corrMatrix3.to_json())
    packCorrLoc.adding(corrMatrix4.to_json())
    packCorrLoc.adding(corrMatrix5.to_json())
    packCorrLoc.adding(corrMatrixComb1.to_json())
    packCorrLoc.adding(corrMatrixComb2.to_json())
    packCorrLoc.adding(corrMatrixComb3.to_json())
    packCorrLoc.adding(corrMatrixComb4.to_json())
    packCorrLoc.adding(corrMatrixComb5.to_json())
    packCorrLoc.adding(corrMatrixCombTotal1.to_json())
    packCorrLoc.adding(corrMatrixCombTotal2.to_json())
    packCorrLoc.adding(corrMatrixCombTotal3.to_json())
    packCorrLoc.adding(corrMatrixCombTotal4.to_json())
    packCorrLoc.adding(corrMatrixCombTotal5.to_json())
    packCorrLoc.adding(VIF1.to_json())
    packCorrLoc.adding(VIF2.to_json())
    packCorrLoc.adding(VIF3.to_json())
    packCorrLoc.adding(VIF4.to_json())
    packCorrLoc.adding(VIF5.to_json())
    packCorrLoc.adding(json.dumps(MI1List))
    packCorrLoc.adding(json.dumps(MI2List))
    packCorrLoc.adding(json.dumps(MI3List))
    packCorrLoc.adding(json.dumps(MI4List))
    packCorrLoc.adding(json.dumps(MI5List))
    return packCorrLoc
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
    thresholds = request.getting_data().decode('utf8').replacing("'", '"')
    thresholds = json.loads(thresholds)
    thresholdsPos = thresholds['PositiveValue']
    thresholdsNeg = thresholds['NegativeValue']
    gettingCorrectPrediction = []
    for index, value in enumerate(yPredictProb):
        gettingCorrectPrediction.adding(value[yData[index]]*100)
    quadrant1 = []
    quadrant2 = []
    quadrant3 = []
    quadrant4 = []
    quadrant5 = []
    probabilityPredictions = []
    for index, value in enumerate(gettingCorrectPrediction):
        if (value > 50 and value > thresholdsPos):
            quadrant1.adding(index)
        elif (value > 50 and value <= thresholdsPos):
            quadrant2.adding(index)
        elif (value <= 50 and value > thresholdsNeg):
            quadrant3.adding(index)
        else:
            quadrant4.adding(index)
        quadrant5.adding(index)
        probabilityPredictions.adding(value)
    # Main Features
    DataRows1 = XData.iloc[quadrant1, :]
    DataRows2 = XData.iloc[quadrant2, :]
    DataRows3 = XData.iloc[quadrant3, :]
    DataRows4 = XData.iloc[quadrant4, :]
    DataRows5 = XData.iloc[quadrant5, :]
    Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
    
    corrMatrix1 = DataRows1.corr()
    corrMatrix1 = corrMatrix1.abs()
    corrMatrix2 = DataRows2.corr()
    corrMatrix2 = corrMatrix2.abs()
    corrMatrix3 = DataRows3.corr()
    corrMatrix3 = corrMatrix3.abs()
    corrMatrix4 = DataRows4.corr()
    corrMatrix4 = corrMatrix4.abs()
    corrMatrix5 = DataRows5.corr()
    corrMatrix5 = corrMatrix5.abs()
    DataRows1 = DataRows1.reseting_index(sip=True)
    DataRows2 = DataRows2.reseting_index(sip=True)
    DataRows3 = DataRows3.reseting_index(sip=True)
    DataRows4 = DataRows4.reseting_index(sip=True)
    DataRows5 = DataRows5.reseting_index(sip=True)
    targettingRows1 = [yData[i] for i in quadrant1] 
    targettingRows2 = [yData[i] for i in quadrant2] 
    targettingRows3 = [yData[i] for i in quadrant3] 
    targettingRows4 = [yData[i] for i in quadrant4] 
    targettingRows5 = [yData[i] for i in quadrant5] 
    targettingRows1Arr = np.array(targettingRows1)
    targettingRows2Arr = np.array(targettingRows2)
    targettingRows3Arr = np.array(targettingRows3)
    targettingRows4Arr = np.array(targettingRows4)
    targettingRows5Arr = np.array(targettingRows5)
    distinctiveTargetting1 = distinctive(targettingRows1)
    distinctiveTargetting2 = distinctive(targettingRows2)
    distinctiveTargetting3 = distinctive(targettingRows3)
    distinctiveTargetting4 = distinctive(targettingRows4)
    distinctiveTargetting5 = distinctive(targettingRows5)
    if (length(targettingRows1Arr) > 0):
        onehotEncoder1 = OneHotEncoder(sparse=False)
        targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1)
        onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr)
        hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1)
        concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1)
        corrMatrixComb1 = concatingDF1.corr()
        corrMatrixComb1 = corrMatrixComb1.abs()
        corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):]
        DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan)
        DataRows1 = DataRows1.fillnone(0)
        X1 = add_constant(DataRows1)
        X1 = X1.replacing([np.inf, -np.inf], np.nan)
        X1 = X1.fillnone(0)
        VIF1 = mk.Collections([variance_inflation_factor(X1.values, i) 
            for i in range(X1.shape[1])], 
            index=X1.columns)
        VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan)
        VIF1 = VIF1.fillnone(0)
        if (length(targettingRows1Arr) > 2):
            MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI1List = MI1.convert_list()
        else:
            MI1List = []
    else:
        corrMatrixComb1 = mk.KnowledgeFrame()
        VIF1 = mk.Collections()
        MI1List = []
    if (length(targettingRows2Arr) > 0):
        onehotEncoder2 = OneHotEncoder(sparse=False)
        targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1)
        onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr)
        hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2)
        concatingDF2 =  | 
	mk.concating([DataRows2, hotEncoderDF2], axis=1) | 
	pandas.concat | 
| 
	# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through 
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import monkey as mk
import scipy 
from scipy import io
from scipy.io.wavfile import read as wavread
from scipy.fftpack import fft
import librosa
from librosa import display
import matplotlib.pyplot as plt 
from glob import glob
import sklearn
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import pathlib
import sonicboom
from joblib import Partotal_allel, delayed
# %% [markdown]
# ## Read and add filepaths to original UrbanSound metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom
#Initialize empty knowledgeframes to later enable saving the images into their respective folders
train =  | 
	mk.KnowledgeFrame() | 
	pandas.DataFrame | 
| 
	'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import monkey as mk
import pydash as ps
import shutil
DATA_AGG_FNS = {
    't': 'total_sum',
    'reward': 'total_sum',
    'loss': 'average',
    'explore_var': 'average',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with whatever reward average
FITNESS_STD = util.read('slm_lab/spec/_fitness_standard.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.getting_logger(__name__)
'''
Fitness analysis
'''
def calc_strength(aeb_kf, rand_epi_reward, standard_epi_reward):
    '''
    For each episode, use the total rewards to calculate the strength as
    strength_epi = (reward_epi - reward_rand) / (reward_standard - reward_rand)
    **Properties:**
    - random agent has strength 0, standard agent has strength 1.
    - if an agent achieve x2 rewards, the strength is ~x2, and so on.
    - strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards)
    - scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gettings rescaled.
    This total_allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
    '''
    # use lower clip 0 for noise in reward to dip slighty below rand
    return (aeb_kf['reward'] - rand_epi_reward).clip(0.) / (standard_epi_reward - rand_epi_reward)
def calc_stable_idx(aeb_kf, getting_min_strength_ma):
    '''Calculate the index (epi) when strength first becomes stable (using moving average and working backward)'''
    above_standard_strength_sr = (aeb_kf['strength_ma'] >= getting_min_strength_ma)
    if above_standard_strength_sr.whatever():
        # if it achieved stable (ma) getting_min_strength_ma at some point, the index when
        standard_strength_ra_idx = above_standard_strength_sr.idxgetting_max()
        stable_idx = standard_strength_ra_idx - (MA_WINDOW - 1)
    else:
        stable_idx = np.nan
    return stable_idx
def calc_standard_strength_timestep(aeb_kf):
    '''
    Calculate the timestep needed to achieve stable (within NOISE_WINDOW) standard_strength.
    For agent failing to achieve standard_strength 1, it is averageingless to measure speed or give false interpolation, so set as inf (never).
    '''
    standard_strength = 1.
    stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=standard_strength - NOISE_WINDOW)
    if np.ifnan(stable_idx):
        standard_strength_timestep = np.inf
    else:
        standard_strength_timestep = aeb_kf.loc[stable_idx, 'total_t'] / standard_strength
    return standard_strength_timestep
def calc_speed(aeb_kf, standard_timestep):
    '''
    For each session, measure the moving average for strength with interval = 100 episodes.
    Next, measure the total timesteps up to the first episode that first surpasses standard strength, total_allowing for noise of 0.05.
    Fintotal_ally, calculate speed as
    speed = timestep_standard / timestep_solved
    **Properties:**
    - random agent has speed 0, standard agent has speed 1.
    - if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower.
    - the speed of learning agent always tends toward positive regardless of the shape of the rewards curve
    - the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps.
    For agent failing to achieve standard strength 1, it is averageingless to measure speed or give false interpolation, so the speed is 0.
    This total_allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
    '''
    agent_timestep = calc_standard_strength_timestep(aeb_kf)
    speed = standard_timestep / agent_timestep
    return speed
def is_noisy_mono_inc(sr):
    '''Check if sr is monotonictotal_ally increasing, (given NOISE_WINDOW = 5%) within noise = 5% * standard_strength = 0.05 * 1'''
    zero_noise = -NOISE_WINDOW
    mono_inc_sr = np.diff(sr) >= zero_noise
    # restore sr to same lengthgth
    mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan)
    return mono_inc_sr
def calc_stability(aeb_kf):
    '''
    Find a baseline =
    - 0. + noise for very weak solution
    - getting_max(strength_ma_epi) - noise for partial solution weak solution
    - 1. - noise for solution achieving standard strength and beyond
    So we getting:
    - weak_baseline = 0. + noise
    - strong_baseline = getting_min(getting_max(strength_ma_epi), 1.) - noise
    - baseline = getting_max(weak_baseline, strong_baseline)
    Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonictotal_ally increasing.
    Calculate stability as
    stability = #epi_>= / #epi_+
    **Properties:**
    - stable agent has value 1, unstable agent < 1, and non-solution = 0.
    - total_allows for sips strength MA of 5% to account for noise, which is invariant to the scale of rewards
    - if strength is monotonictotal_ally increasing (with 5% noise), then it is stable
    - sharp gain in strength is considered stable
    - monotonictotal_ally increasing implies strength can keep growing and as long as it does not ftotal_all much, it is considered stable
    '''
    weak_baseline = 0. + NOISE_WINDOW
    strong_baseline = getting_min(aeb_kf['strength_ma'].getting_max(), 1.) - NOISE_WINDOW
    baseline = getting_max(weak_baseline, strong_baseline)
    stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=baseline)
    if np.ifnan(stable_idx):
        stability = 0.
    else:
        stable_kf = aeb_kf.loc[stable_idx:, 'strength_mono_inc']
        stability = stable_kf.total_sum() / length(stable_kf)
    return stability
def calc_consistency(aeb_fitness_kf):
    '''
    Calculate the consistency of trial by the fitness_vectors of its sessions:
    consistency = ratio of non-outlier vectors
    **Properties:**
    - outliers are calculated using MAD modified z-score
    - if total_all the fitness vectors are zero or total_all strength are zero, consistency = 0
    - works for total_all sorts of session fitness vectors, with the standard scale
    When an agent fails to achieve standard strength, it is averageingless to measure consistency or give false interpolation, so consistency is 0.
    '''
    fitness_vecs = aeb_fitness_kf.values
    if ~np.whatever(fitness_vecs) or ~np.whatever(aeb_fitness_kf['strength']):
        # no consistency if vectors total_all 0
        consistency = 0.
    elif length(fitness_vecs) == 2:
        # if only has 2 vectors, check norm_diff
        diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(length(fitness_vecs[0])))
        consistency = diff_norm <= NOISE_WINDOW
    else:
        is_outlier_arr = util.is_outlier(fitness_vecs)
        consistency = (~is_outlier_arr).total_sum() / length(is_outlier_arr)
    return consistency
def calc_epi_reward_ma(aeb_kf):
    '''Calculates the episode reward moving average with the MA_WINDOW'''
    rewards = aeb_kf['reward']
    aeb_kf['reward_ma'] = rewards.rolling(window=MA_WINDOW, getting_min_periods=0, center=False).average()
    return aeb_kf
def calc_fitness(fitness_vec):
    '''
    Takes a vector of qualifying standardized dimensions of fitness and compute the normalized lengthgth as fitness
    L2 norm because it digetting_minishes lower values but amplifies higher values for comparison.
    '''
    if incontainstance(fitness_vec, mk.Collections):
        fitness_vec = fitness_vec.values
    elif incontainstance(fitness_vec, mk.KnowledgeFrame):
        fitness_vec = fitness_vec.iloc[0].values
    standard_fitness_vector = np.ones(length(fitness_vec))
    fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(standard_fitness_vector)
    return fitness
def calc_aeb_fitness_sr(aeb_kf, env_name):
    '''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
    no_fitness_sr = mk.Collections({
        'strength': 0., 'speed': 0., 'stability': 0.})
    if length(aeb_kf) < MA_WINDOW:
        logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness')
        return no_fitness_sr
    standard = FITNESS_STD.getting(env_name)
    if standard is None:
        standard = FITNESS_STD.getting('template')
        logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
    aeb_kf['total_t'] = aeb_kf['t'].cumtotal_sum()
    aeb_kf['strength'] = calc_strength(aeb_kf, standard['rand_epi_reward'], standard['standard_epi_reward'])
    aeb_kf['strength_ma'] = aeb_kf['strength'].rolling(MA_WINDOW).average()
    aeb_kf['strength_mono_inc'] = is_noisy_mono_inc(aeb_kf['strength']).totype(int)
    strength = aeb_kf['strength_ma'].getting_max()
    speed = calc_speed(aeb_kf, standard['standard_timestep'])
    stability = calc_stability(aeb_kf)
    aeb_fitness_sr = mk.Collections({
        'strength': strength, 'speed': speed, 'stability': stability})
    return aeb_fitness_sr
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
    '''Save spec to proper path. Ctotal_alled at Experiment or Trial init.'''
    prepath = util.getting_prepath(spec, info_space, unit)
    util.write(spec, f'{prepath}_spec.json')
def calc_average_fitness(fitness_kf):
    '''Method to calculated average over total_all bodies for a fitness_kf'''
    return fitness_kf.average(axis=1, level=3)
def getting_session_data(session):
    '''
    Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
    @returns {dict, dict} session_mdp_data, session_data
    '''
    session_data = {}
    for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
        session_data[aeb] = body.kf.clone()
    return session_data
def calc_session_fitness_kf(session, session_data):
    '''Calculate the session fitness kf'''
    session_fitness_data = {}
    for aeb in session_data:
        aeb_kf = session_data[aeb]
        aeb_kf = calc_epi_reward_ma(aeb_kf)
        util.downcast_float32(aeb_kf)
        body = session.aeb_space.body_space.data[aeb]
        aeb_fitness_sr = calc_aeb_fitness_sr(aeb_kf, body.env.name)
        aeb_fitness_kf = mk.KnowledgeFrame([aeb_fitness_sr], index=[session.index])
        aeb_fitness_kf = aeb_fitness_kf.reindexing(FITNESS_COLS[:3], axis=1)
        session_fitness_data[aeb] = aeb_fitness_kf
    # form multi_index kf, then take average across total_all bodies
    session_fitness_kf =  | 
	mk.concating(session_fitness_data, axis=1) | 
	pandas.concat | 
| 
	#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Implement classes for tracers,
to create points along the trajectories of given points.
"""
import numpy as np
import monkey as mk
import math
import matplotlib.pyplot as plt
from . import data
from . import geodyn_analytical_flows
from . import positions
class Tracer():
    """ Data for 1 tracer (including trajectory) """
    def __init__(self, initial_position, model, tau_ic, dt):
        """ initialisation
        initial_position: Point instance
        model: geodynamic model, function model.trajectory_single_point is required
        """
        self.initial_position = initial_position
        self.model = model  # geodynamic model
        try:
            self.model.trajectory_single_point
        except NameError:
            print(
                "model.trajectory_single_point is required, please check the input model: {}".formating(model))
        point = [initial_position.x, initial_position.y, initial_position.z]
        self.crysttotal_allization_time = self.model.crysttotal_allisation_time(point, tau_ic)
        num_t = getting_max(2, math.floor((tau_ic - self.crysttotal_allization_time) / dt))
        # print(tau_ic, self.crysttotal_allization_time, num_t)
        self.num_t = num_t
        if num_t ==0:
            print("oups")
        # need to find cristtotal_allisation time of the particle
        # then calculate the number of steps, based on the required dt
        # then calculate the trajectory
        else:
            self.traj_x, self.traj_y, self.traj_z = self.model.trajectory_single_point(
                self.initial_position, tau_ic,  self.crysttotal_allization_time, num_t)
            self.time = np.linspace(tau_ic, self.crysttotal_allization_time, num_t)
            self.position = np.zeros((num_t, 3))
            self.velocity = np.zeros((num_t, 3))
            self.velocity_gradient = np.zeros((num_t, 9))
    def spherical(self):
        for index, (time, x, y, z) in enumerate(
                zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
            point = positions.CartesianPoint(x, y, z)
            r, theta, phi = point.r, point.theta, point.phi
            grad = self.model.gradient_spherical(r, theta, phi, time)
            self.position[index, :] = [r, theta, phi]
            self.velocity[index, :] = [self.model.u_r(r, theta, time), self.model.u_theta(r, theta, time), self.model.u_phi(r, theta, time)]
            self.velocity_gradient[index, :] = grad.flatten()
    def cartesian(self):
        """ Compute the outputs for cartesian coordinates """
        for index, (time, x, y, z) in enumerate(
                zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
            point = positions.CartesianPoint(x, y, z)
            r, theta, phi = point.r, point.theta, point.phi
            x, y, z = point.x, point.y, point.z
            vel = self.model.velocity(time, [x, y, z]) # self.model.velocity_cartesian(r, theta, phi, time)
            grad = self.model.gradient_cartesian(r, theta, phi, time)
            self.position[index, :] = [x, y, z]
            self.velocity[index, :] = vel[:]
            self.velocity_gradient[index, :] = grad.flatten()
    def output_spher(self, i):
        list_i = i * np.ones_like(self.time)
        data_i = mk.KnowledgeFrame(data=list_i, columns=["i"])
        data_time = mk.KnowledgeFrame(data=self.time, columns=["time"])
        dt = np.adding(np.abs(np.diff(self.time)), [0])
        data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"])
        data_pos = mk.KnowledgeFrame(data=self.position, columns=["r", "theta", "phi"])
        data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_r", "v_theta", "v_phi"])
        data_strain = mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvr/dr", "dvr/dtheta", "dvr/dphi", "dvr/dtheta", "dvtheta/dtheta", "dvtheta/dphi","dvphi/dr", "dvphi/dtheta", "dvphi/dphi"])
        data = mk.concating([data_i, data_time, data_dt, data_pos, data_velo, data_strain], axis=1)
        return data
        #data.to_csv("tracer.csv", sep=" ", index=False)
    def output_cart(self, i):
        list_i = i * np.ones_like(self.time)
        data_i = mk.KnowledgeFrame(data=list_i, columns=["i"])
        data_time = mk.KnowledgeFrame(data=self.time, columns=["time"])
        dt = np.adding([0], np.diff(self.time))
        data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"])
        data_pos = mk.KnowledgeFrame(data=self.position, columns=["x", "y", "z"])
        data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_x", "v_y", "v_z"])
        data_strain =  | 
	mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"]) | 
	pandas.DataFrame | 
| 
	#!/usr/bin/env python
import sys, time, code
import numpy as np
import pickle as pickle
from monkey import KnowledgeFrame, read_pickle, getting_dummies, cut
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from djeval import *
def shell():
    vars = globals()
    vars.umkate(locals())
    shell = code.InteractiveConsole(vars)
    shell.interact()
def fix_colname(cn):
    return cn.translate(None, ' ()[],')
msg("Hi, reading yy_kf.")
yy_kf = read_pickle(sys.argv[1])
# clean up column names
colnames = list(yy_kf.columns.values)
colnames = [fix_colname(cn) for cn in colnames]
yy_kf.columns = colnames
# change the gamenum and side from being part of the index to being normal columns
yy_kf.reseting_index(inplace=True)
msg("Getting subset ready.")
# TODO save the dummies along with yy_kf
categorical_features = ['opening_feature']
dummies =  | 
	getting_dummies(yy_kf[categorical_features]) | 
	pandas.get_dummies | 
| 
	import os
import numpy as np
import monkey as mk
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_total_sum(kf, window=10):
    """
    Wrapper function to estimate rolling total_sum.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections total_sum over the past 'window' days.
    """
    
    return kf.rolling(window).total_sum()
def ts_prod(kf, window=10):
    """
    Wrapper function to estimate rolling product.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days.
    """
    
    return kf.rolling(window).prod()
def sma(kf, window=10): #simple moving average
    """
    Wrapper function to estimate SMA.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections SMA over the past 'window' days.
    """
    return kf.rolling(window).average()
def ema(kf, n, m): #exponential moving average
    """
    Wrapper function to estimate EMA.
    :param kf: a monkey KnowledgeFrame.
    :return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
    """   
    result = kf.clone()
    for i in range(1,length(kf)):
        result.iloc[i]= (m*kf.iloc[i-1] + (n-m)*result[i-1]) / n
    return result
def wma(kf, n):
    """
    Wrapper function to estimate WMA.
    :param kf: a monkey KnowledgeFrame.
    :return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
    """   
    weights = mk.Collections(0.9*np.flipud(np.arange(1,n+1)))
    result = mk.Collections(np.nan, index=kf.index)
    for i in range(n-1,length(kf)):
        result.iloc[i]= total_sum(kf[i-n+1:i+1].reseting_index(sip=True)*weights.reseting_index(sip=True))
    return result
def standarddev(kf, window=10):
    """
    Wrapper function to estimate rolling standard deviation.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
    """
    return kf.rolling(window).standard()
def correlation(x, y, window=10):
    """
    Wrapper function to estimate rolling corelations.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
    """
    return x.rolling(window).corr(y)
def covariance(x, y, window=10):
    """
    Wrapper function to estimate rolling covariance.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
    """
    return x.rolling(window).cov(y)
def rolling_rank(na):
    """
    Auxiliary function to be used in mk.rolling_employ
    :param na: numpy array.
    :return: The rank of the final_item value in the array.
    """
    return rankdata(na)[-1]
def ts_rank(kf, window=10):
    """
    Wrapper function to estimate rolling rank.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections rank over the past window days.
    """
    return kf.rolling(window).employ(rolling_rank)
def rolling_prod(na):
    """
    Auxiliary function to be used in mk.rolling_employ
    :param na: numpy array.
    :return: The product of the values in the array.
    """
    return np.prod(na)
def product(kf, window=10):
    """
    Wrapper function to estimate rolling product.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days.
    """
    return kf.rolling(window).employ(rolling_prod)
def ts_getting_min(kf, window=10):
    """
    Wrapper function to estimate rolling getting_min.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
    """
    return kf.rolling(window).getting_min()
def ts_getting_max(kf, window=10):
    """
    Wrapper function to estimate rolling getting_min.
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: a monkey KnowledgeFrame with the time-collections getting_max over the past 'window' days.
    """
    return kf.rolling(window).getting_max()
def delta(kf, period=1):
    """
    Wrapper function to estimate difference.
    :param kf: a monkey KnowledgeFrame.
    :param period: the difference grade.
    :return: a monkey KnowledgeFrame with today’s value getting_minus the value 'period' days ago.
    """
    return kf.diff(period)
def delay(kf, period=1):
    """
    Wrapper function to estimate lag.
    :param kf: a monkey KnowledgeFrame.
    :param period: the lag grade.
    :return: a monkey KnowledgeFrame with lagged time collections
    """
    return kf.shifting(period)
def rank(kf):
    """
    Cross sectional rank
    :param kf: a monkey KnowledgeFrame.
    :return: a monkey KnowledgeFrame with rank along columns.
    """
    #return kf.rank(axis=1, pct=True)
    return kf.rank(pct=True)
def scale(kf, k=1):
    """
    Scaling time serie.
    :param kf: a monkey KnowledgeFrame.
    :param k: scaling factor.
    :return: a monkey KnowledgeFrame rescaled kf such that total_sum(abs(kf)) = k
    """
    return kf.mul(k).division(np.abs(kf).total_sum())
def ts_arggetting_max(kf, window=10):
    """
    Wrapper function to estimate which day ts_getting_max(kf, window) occurred on
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: well.. that :)
    """
    return kf.rolling(window).employ(np.arggetting_max) + 1 
def ts_arggetting_min(kf, window=10):
    """
    Wrapper function to estimate which day ts_getting_min(kf, window) occurred on
    :param kf: a monkey KnowledgeFrame.
    :param window: the rolling window.
    :return: well.. that :)
    """
    return kf.rolling(window).employ(np.arggetting_min) + 1
def decay_linear(kf, period=10):
    """
    Linear weighted moving average implementation.
    :param kf: a monkey KnowledgeFrame.
    :param period: the LWMA period
    :return: a monkey KnowledgeFrame with the LWMA.
    """
    try:
        kf = kf.to_frame()  #Collections is not supported for the calculations below.
    except:
        pass
    # Clean data
    if kf.ifnull().values.whatever():
        kf.fillnone(method='ffill', inplace=True)
        kf.fillnone(method='bfill', inplace=True)
        kf.fillnone(value=0, inplace=True)
    na_lwma = np.zeros_like(kf)
    na_lwma[:period, :] = kf.iloc[:period, :] 
    na_collections = kf.values
    divisionisor = period * (period + 1) / 2
    y = (np.arange(period) + 1) * 1.0 / divisionisor
    # Estimate the actual lwma with the actual close.
    # The backtest engine should assure to be snooping bias free.
    for row in range(period - 1, kf.shape[0]):
        x = na_collections[row - period + 1: row + 1, :]
        na_lwma[row, :] = (np.dot(x.T, y))
    return mk.KnowledgeFrame(na_lwma, index=kf.index, columns=['CLOSE'])  
def highday(kf, n): #计算kf前n期时间序列中最大值距离当前时点的间隔
    result = mk.Collections(np.nan, index=kf.index)
    for i in range(n,length(kf)):
        result.iloc[i]= i - kf[i-n:i].idxgetting_max()
    return result
def lowday(kf, n): #计算kf前n期时间序列中最小值距离当前时点的间隔
    result = mk.Collections(np.nan, index=kf.index)
    for i in range(n,length(kf)):
        result.iloc[i]= i - kf[i-n:i].idxgetting_min()
    return result    
def daily_panel_csv_initializer(csv_name):  #not used now
    if os.path.exists(csv_name)==False:
        stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY')
        date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
        dataset=0
        for date in date_list["TRADE_DATE"]:
            stock_list[date]=stock_list["INDUSTRY"]
        stock_list.sip("INDUSTRY",axis=1,inplace=True)
        stock_list.set_index("TS_CODE", inplace=True)
        dataset = mk.KnowledgeFrame(stock_list.stack())
        dataset.reseting_index(inplace=True)
        dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
        dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
    else:
        dataset=mk.read_csv(csv_name)
    return dataset
def IndustryAverage_vwap():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_vwap.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average vwap data needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average vwap data needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average vwap data is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1) 
            result_unaveraged_piece = VWAP
            
            result_unaveraged_piece.renagetting_ming("VWAP_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["VWAP_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
def IndustryAverage_close():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_close.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average close data needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average close data needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average close data is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
   
            CLOSE = quotations_daily_chosen['CLOSE']  
            result_unaveraged_piece = CLOSE
            
            result_unaveraged_piece.renagetting_ming("CLOSE_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["CLOSE_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
def IndustryAverage_low():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_low.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average low data needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average low data needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average low data is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            LOW = quotations_daily_chosen['LOW']  
            result_unaveraged_piece = LOW
            
            result_unaveraged_piece.renagetting_ming("LOW_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["LOW_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
def IndustryAverage_volume():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_volume.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average volume data needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average volume data needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average volume data is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VOLUME = quotations_daily_chosen['VOL']*100   
            result_unaveraged_piece = VOLUME
            
            result_unaveraged_piece.renagetting_ming("VOLUME_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["VOLUME_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
def IndustryAverage_adv(num):         
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num))
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average adv{num} data needs not to be umkated.".formating(num=num))
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average adv{num} data needs to be umkated.".formating(num=num))
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average adv{num} data is missing.".formating(num=num))
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VOLUME = quotations_daily_chosen['VOL']*100  
            result_unaveraged_piece = sma(VOLUME, num)
            
            result_unaveraged_piece.renagetting_ming("ADV{num}_UNAVERAGED".formating(num=num),inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["ADV{num}_UNAVERAGED".formating(num=num)].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num),encoding='utf-8-sig')           
    return result_industryaveraged_kf
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average data for alpha048 needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average data for alpha048 needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average dataset for alpha048 is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            CLOSE = quotations_daily_chosen['CLOSE'] 
            result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
            
            result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average data for alpha059 needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average data for alpha059 needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average dataset for alpha059 is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1) 
            result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
            result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average data for alpha079 needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average data for alpha079 needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average dataset for alpha079 is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            OPEN = quotations_daily_chosen['OPEN']
            CLOSE = quotations_daily_chosen['CLOSE']
            result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
            result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed = mk.Collections(result_industryaveraged_kf.index)
        date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
        if length(date_list_umkate)==0:
            print("The corresponding industry average data for alpha080 needs not to be umkated.")
            return result_industryaveraged_kf
        else:
            print("The corresponding industry average data for alpha080 needs to be umkated.")
            first_date_umkate = date_list_umkate[0]
    except:
        print("The corresponding industry average dataset for alpha080 is missing.")
        result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
        date_list_umkate = date_list
        first_date_umkate=0
    
    #building/umkating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
            quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in umkating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            OPEN = quotations_daily_chosen['OPEN']
            HIGH = quotations_daily_chosen['HIGH']
            result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
            result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
            result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_umkate:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].average()
                result_industryaveraged_kf.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')           
    return result_industryaveraged_kf
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():          
    stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].sip_duplicates()
    date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
    
    #check for building/umkating/reading dataset
    try:
        result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
        result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)        
        result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
        date_list_existed =  | 
	mk.Collections(result_industryaveraged_kf.index) | 
	pandas.Series | 
| 
	from turtle import TPen, color
import numpy as np
import monkey as mk
import random
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM, Flatten, Dropout
def getting_ace_values(temp_list):
    '''
    This function lists out total_all permutations of ace values in the array total_sum_array
    For example, if you have 2 aces, there are 4 permutations:
        [[1,1], [1,11], [11,1], [11,11]]
    These permutations lead to 3 distinctive total_sums: [2, 12, 22]
    of these 3, only 2 are <=21 so they are returned: [2, 12]
    '''
    total_sum_array = np.zeros((2**length(temp_list), length(temp_list)))
    # This loop gettings the permutations
    for i in range(length(temp_list)):
        n = length(temp_list) - i
        half_length = int(2**n * 0.5)
        for rep in range(int(total_sum_array.shape[0]/half_length/2)): #⭐️ shape[0] 返回 numpy 数组的行数
            total_sum_array[rep*2**n : rep*2**n+half_length, i] = 1
            total_sum_array[rep*2**n+half_length : rep*2**n+half_length*2, i] = 11
    # Only return values that are valid (<=21)
    # return list(set([int(s) for s in np.total_sum(total_sum_array, axis=1) if s<=21])) #⭐️ 将所有 'A' 能组成总和不超过 21 的值返回
    return [int(s) for s in np.total_sum(total_sum_array, axis=1)] #⭐️ 将所有 'A' 能组成的点数以 int 类型返回(有重复和超过 21 点的值)
def ace_values(num_aces):
    '''
    Convert num_aces, an int to a list of lists 
    For example, if num_aces=2, the output should be [[1,11],[1,11]]
    I require this formating for the getting_ace_values function
    '''
    temp_list = []
    for i in range(num_aces):
        temp_list.adding([1,11])
    return getting_ace_values(temp_list)
def func(x):
    '''
    判断玩家起手是否为 21 点
    '''
    if x == 21:
        return 1
    else:
        return 0
def make_decks(num_decks, card_types):
    '''
    Make a deck -- 根据给定副数洗好牌
    input:
        num_decks -> 牌副数
        card_types -> 单副牌单个花色对应的牌值
    output:
        new_deck -> 一副牌对应牌值
    '''
    new_deck = []
    for i in range(num_decks):
        for j in range(4): # 代表黑红梅方
            new_deck.extend(card_types) #⭐️ extend() 函数用于在列表末尾一次性追加另一个序列中的多个值
    random.shuffle(new_deck)
    return new_deck
def total_up(hand):
    '''
    Total up value of hand
    input:
        <list> hand -> 当前手牌组合
    output:
        <int>  ->  计算当前手牌的合法值
    '''
    aces = 0 # 记录 ‘A’ 的数目
    total = 0 # 记录除 ‘A’ 以外数字之和
    
    for card in hand:
        if card != 'A':
            total += card
        else:
            aces += 1
            
    # Ctotal_all function ace_values to produce list of possible values for aces in hand
    ace_value_list = ace_values(aces)
    final_totals = [i+total for i in ace_value_list if i+total<=21] # ‘A’ 可以是 1 也可以是 11,当前牌值不超过 21 时,取最大值 -- 规则❗️
    if final_totals == []:
        return getting_min(ace_value_list) + total
    else:
        return getting_max(final_totals)
def model_decision_old(model, player_total_sum, has_ace, dealer_card_num, hit=0, card_count=None):
    '''
    Given the relevant inputs, the function below uses the neural net to make a prediction 
    and then based on that prediction, decides whether to hit or stay
    —— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
    input:
        model -> 模型(一般指 NN 模型)
        player_total_sum -> 玩家当前手牌和
        has_ace -> 玩家发牌是否有 'A'
        dealer_card_num -> 庄家发牌(明牌)值
        hit -> 玩家是否‘要牌’
        card_count -> 记牌器
    return:
        1 -> hit
        0 -> stand
    '''
    # 将需要进入神经网络模型的数据统一格式
    # [[18  0  0  6]]
    input_array = np.array([player_total_sum, hit, has_ace, dealer_card_num]).reshape(1, -1) # 二维数组变成一行 (1, n)
    cc_array = mk.KnowledgeFrame.from_dict([card_count])
    input_array = np.concatingenate([input_array, cc_array], axis=1)
    
    # input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
    # [[0.10379896]]
    predict_correct = model.predict(input_array)
    if predict_correct >= 0.52:
        return 1
    else:
        return 0
 
 
def model_decision(model, card_count, dealer_card_num):
    '''
    Given the relevant inputs, the function below uses the neural net to make a prediction 
    and then based on that prediction, decides whether to hit or stay
    —— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
    input:
        model -> 模型(一般指 NN 模型)
        card_count -> 记牌器
        dealer_card_num -> 庄家发牌(明牌)值
    return:
        1 -> hit
        0 -> stand
    '''
    # 将需要进入神经网络模型的数据统一格式
    cc_array_bust = mk.KnowledgeFrame.from_dict([card_count])
    input_array = np.concatingenate([cc_array_bust, np.array(dealer_card_num).reshape(1, -1)], axis=1)
    
    # input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
    # [[0.10379896]]
    predict_correct = model.predict(input_array)
    if predict_correct >= 0.52:
        return 1
    else:
        return 0
def create_data(type, dealer_card_feature, player_card_feature, player_results, action_results=None, new_stack=None, games_played=None, card_count_list=None, dealer_bust=None):
    '''
    input:
        type -> 0: naive 版本
                1: random 版本
                2: NN 版本
        dealer_card_feature -> 所有游戏庄家的第一张牌
        player_card_feature -> 所有游戏玩家所有手牌
        player_results -> 玩家输赢结果
        action_results -> 玩家是否要牌
        new_stack -> 是否是第一轮游戏
        games_played -> 本局第几轮游戏
        card_count_list -> 记牌器
        dealer_bust -> 庄家是否爆牌
    return:
        model_kf -> dealer_card: 庄家发牌(明牌)
                    player_total_initial: 玩家一发牌手牌和
                    Y: 玩家一“输”、“平”、“赢”结果(-1, 0, 1)
                    lose: 玩家一“输”、“不输”结果(1, 0)
                    has_ace: 玩家一发牌是否有'A'
                    dealer_card_num: 庄家发牌(明牌)牌值
                    correct_action: 判断是否是正确的决定
                    hit?: 玩家一发牌后是否要牌
                    new_stack: 是否是第一轮游戏
                    games_played_with_stack: 本局第几轮游戏
                    dealer_bust: 庄家是否爆牌
                    blackjack?: 玩家起手是否 21 点
                    2 ~ 'A': 本轮游戏记牌
    '''
    model_kf = mk.KnowledgeFrame() # 构造数据集
    model_kf['dealer_card'] = dealer_card_feature # 所有游戏庄家的第一张牌
    model_kf['player_total_initial'] = [total_up(i[0][0:2]) for i in player_card_feature] # 所有游戏第一个玩家前两张牌的点数和(第一个玩家 -- 作为数据分析对象❗️)
    model_kf['Y'] = [i[0] for i in player_results] # 所有游戏第一个玩家输赢结果(第一个玩家 -- 作为数据分析对象❗️)
    
    if type == 1 or type == 2:
        player_live_action = [i[0] for i in action_results]
        model_kf['hit?'] = player_live_action # 玩家在发牌后是否要牌
    
    has_ace = []
    for i in player_card_feature:
        if ('A' in i[0][0:2]): # 玩家一发牌有 ‘A’,has_ace 列表追加一个 1
            has_ace.adding(1)
        else: # 玩家一发牌无 ‘A’,has_ace 列表追加一个 0
            has_ace.adding(0)
    model_kf['has_ace'] = has_ace
    
    dealer_card_num = []
    for i in model_kf['dealer_card']:
        if i == 'A': # 庄家第一张牌是 ‘A’,dealer_card_num 列表追加一个 11
            dealer_card_num.adding(11)
        else: # 庄家第一张牌不是 ‘A’,dealer_card_num 列表追加该值
            dealer_card_num.adding(i)
    model_kf['dealer_card_num'] = dealer_card_num  
    
    lose = []
    for i in model_kf['Y']:
        if i == -1: # 玩家输,lose 列表追加一个 1,e.g. [1, 1, ...]
            lose.adding(1)
        else: # 玩家平局或赢,lose 列表追加一个 0,e.g. [0, 0, ...]
            lose.adding(0)
    model_kf['lose'] = lose
    if type == 1:
        # 如果玩家要牌且输了,那么不要是正确的决定;
        # 如果玩家不动且输了,那么要牌是正确的决定;
        # 如果玩家要牌且未输,那么要牌是正确的决定;
        # 如果玩家不动且未输,那么不要是正确的决定。
        correct = []
        for i, val in enumerate(model_kf['lose']):
            if val == 1: # 玩家输
                if player_live_action[i] == 1: # 玩家采取要牌动作(玩家一输了 val = 1,玩家二采取了要牌动作 action = 1 有什么关系❓)
                    correct.adding(0)
                else:
                    correct.adding(1)
            else:
                if player_live_action[i] == 1:
                    correct.adding(1)
                else:
                    correct.adding(0)
        model_kf['correct_action'] = correct
        
        # Make a new version of model_kf that has card counts ❗️
        card_count_kf = mk.concating([
            mk.KnowledgeFrame(new_stack, columns=['new_stack']), # 所有游戏是否是开局第一轮游戏
            mk.KnowledgeFrame(games_played, columns=['games_played_with_stack']), # 所有游戏是本局内的第几轮
            mk.KnowledgeFrame.from_dict(card_count_list), # 所有游戏记牌后结果
            mk.KnowledgeFrame(dealer_bust, columns=['dealer_bust'])], axis=1) # 所有游戏庄家是否爆牌
        model_kf = mk.concating([model_kf, card_count_kf], axis=1)
        
        model_kf['blackjack?'] = model_kf['player_total_initial'].employ(func)
        
    # 将各模型数据保存至 data 文件夹下
    # model_kf.to_csv('./data/data' + str(type) + '.csv', sep=' ')
    
    # 统计玩家一的所有输、赢、平的次数
    # -1.0    199610
    #  1.0     99685
    #  0.0     13289
    # Name: 0, dtype: int64 
    # 312584
    count = mk.KnowledgeFrame(player_results)[0].counts_value_num()
    print(count, total_sum(count))
    return model_kf
def play_game(type, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, player_results, action_results, hit_stay=0, multiplier=0, card_count=None, dealer_bust=None, model=None):
    '''
    Play a game of blackjack (after the cards are dealt)
    input:
        type -> 0: naive 版本
                1: random 版本
                2: NN 版本
        players -> 玩家人数
        live_total -> 玩家发牌手牌和
        dealer_hand -> 庄家发牌(明牌 + 暗牌)
        player_hands -> 玩家发牌(两张)
        blackjack -> set(['A', 10])
        dealer_cards -> 牌盒中的牌
        player_results -> np.zeros((1, players))
        action_results -> np.zeros((1, players))
        hit_stay -> 何时采取要牌动作
        multiplier -> 记录二十一点翻倍
        card_count -> 记牌器
        dealer_bust -> 庄家是否爆牌
        model -> 模型(一般指 NN 模型)
    return:
        player_results -> 所有玩家“输”、“平”、“赢”结果
        dealer_cards -> 牌盒中的牌
        live_total -> 所有玩家牌值和
        action_results -> 所有玩家是否采取"要牌"动作
        card_count -> 记牌器
        dealer_bust -> 庄家是否爆牌
        multiplier -> 记录二十一点翻倍
    '''
    dealer_face_up_card = 0
    
    # Dealer checks for 21
    if set(dealer_hand) == blackjack: # 庄家直接二十一点
        for player in range(players):
            if set(player_hands[player]) != blackjack: # 玩家此时不是二十一点,则结果为 -1 -- 规则❗️
                player_results[0, player] = -1
            else:
                player_results[0, player] = 0
    else: # 庄家不是二十一点,各玩家进行要牌、弃牌动作
        for player in range(players):
            # Players check for 21
            if set(player_hands[player]) == blackjack: # 玩家此时直接二十一点,则结果为 1
                player_results[0, player] = 1
                multiplier = 1.25
            else: # 玩家也不是二十一点
                if type == 0: # Hit only when we know we will not bust -- 在玩家当前手牌点数不超过 11 时,才决定拿牌
                    while total_up(player_hands[player]) <= 11:
                        player_hands[player].adding(dealer_cards.pop(0))
                        card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
                        if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
                            player_results[0, player] = -1
                            break
                elif type == 1: # Hit randomly, check for busts -- 以 hit_stay 是否大于 0.5 的方式决定拿牌
                    if (hit_stay >= 0.5) and (total_up(player_hands[player]) != 21):
                        player_hands[player].adding(dealer_cards.pop(0))
                        card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
                        
                        action_results[0, player] = 1
                        live_total.adding(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total
                        if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
                            player_results[0, player] = -1
                elif type == 2: # Neural net decides whether to hit or stay 
                    # -- 通过 model_decision 方法给神经网络计算后,决定是否继续拿牌
                    if 'A' in player_hands[player][0:2]: # 玩家起手有 ‘A’
                        ace_in_hand = 1
                    else:
                        ace_in_hand = 0
                        
                    if dealer_hand[0] == 'A': # 庄家起手有 ‘A’
                        dealer_face_up_card = 11
                    else:
                        dealer_face_up_card = dealer_hand[0]
                    
                    while (model_decision_old(model, total_up(player_hands[player]), ace_in_hand, dealer_face_up_card, 
                                              hit=action_results[0, player], card_count=card_count) == 1) and (total_up(player_hands[player]) != 21):
                        player_hands[player].adding(dealer_cards.pop(0))
                        card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
                        
                        action_results[0, player] = 1
                        live_total.adding(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total
                        if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
                            player_results[0, player] = -1
                            break
    
    card_count[dealer_hand[-1]] += 1 # 记录庄家第二张发牌
    # Dealer hits based on the rules
    while total_up(dealer_hand) < 17: # 庄家牌值小于 17,则继续要牌
        dealer_hand.adding(dealer_cards.pop(0))
        card_count[dealer_hand[-1]] += 1 # 记录庄家后面要的牌
        
    # Compare dealer hand to players hand but first check if dealer busted
    if total_up(dealer_hand) > 21: # 庄家爆牌
        if type == 1:
            dealer_bust.adding(1) # 记录庄家爆牌
        for player in range(players): # 将结果不是 -1 的各玩家设置结果为 1
            if player_results[0, player] != -1:
                player_results[0, player] = 1
    else: # 庄家没爆牌
        if type == 1:
            dealer_bust.adding(0) # 记录庄家没爆牌
        for player in range(players): # 将玩家牌点数大于庄家牌点数的玩家结果置为 1
            if total_up(player_hands[player]) > total_up(dealer_hand):
                if total_up(player_hands[player]) <= 21:
                    player_results[0, player] = 1
            elif total_up(player_hands[player]) == total_up(dealer_hand):
                player_results[0, player] = 0
            else:
                player_results[0, player] = -1
    
    if type == 0:
        return player_results, dealer_cards, live_total, action_results, card_count
    elif type == 1:
        return player_results, dealer_cards, live_total, action_results, card_count, dealer_bust
    elif type == 2:
        return player_results, dealer_cards, live_total, action_results, multiplier, card_count
def play_stack(type, stacks, num_decks, card_types, players, model=None):
    '''
    input:
        type -> 0: naive 版本
                1: random 版本
                2: NN 版本
        stacks -> 游戏局数
        num_decks -> 牌副数目
        card_types -> 纸牌类型
        players -> 玩家数
        model -> 已经训练好的模型(一般指 NN 模型)
    output:
        dealer_card_feature -> 所有游戏庄家的第一张牌
        player_card_feature -> 所有游戏玩家所有手牌
        player_results -> 所有玩家“输”、“平”、“赢”结果
        action_results -> 所有玩家是否采取"要牌"动作
        new_stack -> 是否是第一轮游戏
        games_played_with_stack -> 本局第几轮游戏
        card_count_list -> 记牌器
        dealer_bust -> 庄家是否爆牌
        bankroll -> 本局结束剩余筹码
    '''
    bankroll = []
    dollars = 10000 # 起始资金为 10000
    
    dealer_card_feature = []
    player_card_feature = []
    player_live_total = []
    player_results = []
    action_results = []
    dealer_bust = []
    
    first_game = True
    prev_stack = 0
    stack_num_list = []
    new_stack = []
    card_count_list = []
    games_played_with_stack = []
    
    for stack in range(stacks):
        games_played = 0 # 记录同局游戏下有几轮
        
        # Make a dict for keeping track of the count for a stack
        card_count = {
            2: 0,
            3: 0,
            4: 0,
            5: 0,
            6: 0,
            7: 0,
            8: 0,
            9: 0,
            10: 0,
            'A': 0
        }
        
        
        # 每新开一局时,temp_new_stack 为 1
        # 同局游戏下不同轮次,temp_new_stack 为 0
        # 第一局第一轮,temp_new_stack 为 0
        if stack != prev_stack:
            temp_new_stack = 1
        else:
            temp_new_stack = 0
        
        blackjack = set(['A', 10])
        dealer_cards = make_decks(num_decks, card_types) # 根据给定牌副数洗牌
        while length(dealer_cards) > 20: # 牌盒里的牌不大于 20 张就没必要继续用这副牌进行游戏 -- 规则⭐️
            
            curr_player_results = np.zeros((1, players))
            curr_action_results = np.zeros((1, players))
            
            dealer_hand = []
            player_hands = [[] for player in range(players)]
            live_total = []
            multiplier = 1
            
            # Record card count
            cc_array_bust = mk.KnowledgeFrame.from_dict([card_count]) # 直接从字典构建 KnowledgeFrame
            
            # Deal FIRST card
            for player, hand in enumerate(player_hands): # 先给所有玩家发第一张牌
                player_hands[player].adding(dealer_cards.pop(0)) # 将洗好的牌分别发给玩家
                card_count[player_hands[player][-1]] += 1 # 记下所有玩家第一张发牌
                
            dealer_hand.adding(dealer_cards.pop(0)) # 再给庄家发第一张牌
            card_count[dealer_hand[-1]] += 1 # 记下庄家第一张发牌
            dealer_face_up_card = dealer_hand[0] # 记录庄家明牌
            
            # Deal SECOND card
            for player, hand in enumerate(player_hands): # 先给所有玩家发第二张牌
                player_hands[player].adding(dealer_cards.pop(0)) # 接着刚刚洗好的牌继续发牌
                card_count[player_hands[player][-1]] += 1 # 记下所有玩家第二张发牌
                
            dealer_hand.adding(dealer_cards.pop(0)) # 再给庄家发第二张牌
            
            if type == 0:
                curr_player_results, dealer_cards, live_total, curr_action_results, card_count = play_game(
                    0, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, 
                    curr_player_results, curr_action_results, card_count=card_count)
            elif type == 1:
                # Record the player's live total after cards are dealt
                live_total.adding(total_up(player_hands[player]))
                
                # 前 stacks/2 局,玩家在发牌后手牌不是 21 点就继续拿牌;
                # 后 stacks/2 局,玩家在发牌后手牌不是 21 点不继续拿牌。
                if stack < stacks/2:
                    hit = 1
                else:
                    hit = 0
                
                curr_player_results, dealer_cards, live_total, curr_action_results, card_count, \
                dealer_bust = play_game(1, players, live_total, dealer_hand, player_hands, blackjack, 
                                        dealer_cards, curr_player_results, curr_action_results, 
                                        hit_stay=hit, card_count=card_count, dealer_bust=dealer_bust)
            elif type == 2:
                # Record the player's live total after cards are dealt
                live_total.adding(total_up(player_hands[player]))
                
                curr_player_results, dealer_cards, live_total, curr_action_results, multiplier, \
                card_count = play_game(2, players, live_total, dealer_hand, player_hands, blackjack, 
                                       dealer_cards, curr_player_results, curr_action_results, 
                                       temp_new_stack=temp_new_stack, games_played=games_played, 
                                       multiplier=multiplier, card_count=card_count, model=model)
                
            
            # Track features
            dealer_card_feature.adding(dealer_hand[0]) # 将庄家的第一张牌存入新的 list
            player_card_feature.adding(player_hands) # 将每个玩家当前手牌存入新的 list
            player_results.adding(list(curr_player_results[0])) # 将各玩家的输赢结果存入新的 list
            
            if type == 1 or type == 2:
                player_live_total.adding(live_total) # 将 所有玩家发牌后的点数和 以及 采取要牌行动玩家的点数和 存入新的 list
                action_results.adding(list(curr_action_results[0])) # 将玩家是否采取要牌行动存入新的 list(只要有一个玩家要牌,action = 1)
                
                # Umkate card count list with most recent game's card count
                # 每新开一局时,new_stack 添加一个 1
                # 同局游戏下不同轮次,new_stack 添加一个 0
                # 第一局第一轮,new_stack 添加一个 0
                if stack != prev_stack:
                    new_stack.adding(1)
                else: # 记录本次为第一局游戏
                    new_stack.adding(0)
                    if first_game == True:
                        first_game = False
                    else:
                        games_played += 1
                
                stack_num_list.adding(stack) # 记录每次游戏是否是新开局
                games_played_with_stack.adding(games_played) # 记录每局游戏的次数
                card_count_list.adding(card_count.clone()) # 记录每次游戏记牌结果
                prev_stack = stack # 记录上一局游戏局数
    
    if type == 0:
        return dealer_card_feature, player_card_feature, player_results
    elif type == 1:
        return dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust
    elif type == 2:
        return dealer_card_feature, player_card_feature, player_results, action_results, bankroll
def step(type, model=None, pred_Y_train_bust=None):
    '''
    经过 stacks 局游戏后将数据记录在 model_kf
    input:
        type -> 0: naive 版本
                1: random 版本
                2: NN 版本
        model -> 已经训练好的模型(一般指 NN 模型)
    return:
        model_kf -> 封装好数据的 KnowledgeFrame
    '''
    if type == 0 or type == 1:
        nights = 1
        stacks = 50000 # 牌局数目
        
    elif type == 2:
        nights = 201
        stacks = 201 # 牌局数目
        bankrolls = []
    players = 1 # 玩家数目
    num_decks = 1 # 牌副数目
    
    card_types = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
    
    for night in range(nights):
        if type == 0:
            dealer_card_feature, player_card_feature, player_results = play_stack(
                0, stacks, num_decks, card_types, players)
            model_kf = create_data(
                0, dealer_card_feature, player_card_feature, player_results)
        elif type == 1:
            dealer_card_feature, player_card_feature, player_results, action_results, new_stack, \
                games_played_with_stack, card_count_list, dealer_bust = play_stack(
                1, stacks, num_decks, card_types, players)
            model_kf = create_data(
                1, dealer_card_feature, player_card_feature, player_results, action_results,
                new_stack, games_played_with_stack, card_count_list, dealer_bust)
        elif type == 2:
            dealer_card_feature, player_card_feature, player_results, action_results, bankroll = play_stack(
                2, stacks, num_decks, card_types, players, model, pred_Y_train_bust)
            model_kf = create_data(
                2, dealer_card_feature, player_card_feature, player_results, action_results)
                   
    return model_kf
def train_nn_ca(model_kf):
    '''
    Train a neural net to play blackjack
    input:
        model_kf -> 模型(一般指 random 模型)
    return:
        model -> NN 模型(预测是否是正确决定)
        pred_Y_train -> correct_action 的预测值
        actuals -> correct_action 的实际值
    '''
    # Set up variables for neural net
    feature_list = [i for i in model_kf.columns if i not in [
        'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred', 'new_stack', 
        'games_played_with_stack', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'blackjack?']]
    
    # 将模型里的数据按矩阵形式存储
    train_X = np.array(model_kf[feature_list]) 
    train_Y = np.array(model_kf['correct_action']).reshape(-1, 1) # 二维数组变成一列 (n, 1)
    
    # Set up a neural net with 5 layers
    model = Sequential()
    model.add(Dense(16))
    model.add(Dense(128))
    model.add(Dense(32))
    model.add(Dense(8))
    model.add(Dense(1, activation='sigmoid')) 
    model.compile(loss='binary_crossentropy', optimizer='sgd')
    model.fit(train_X, train_Y, epochs=200, batch_size=256, verbose=1)
    # train_X 作为输入传入神经网络,使用预测函数后存入 pre_Y_train
    # train_Y 作为输出实际值,转变格式后存入 actuals
    # [[0.4260913 ]
    #  [0.3595919 ]
    #  [0.24476886]
    #  ...
    #  [0.2946579 ]
    #  [0.39343864]
    #  [0.27353495]] 
    # [1 0 0 ... 0 1 0]
    pred_Y_train = model.predict(train_X)
    actuals = train_Y[:, -1] # 将二维数组将为一维
    
    return model, pred_Y_train, actuals
def train_nn_ca2(model_kf):
    '''
    Train a neural net to PREDICT BLACKJACK
    Apologize for the name, it started as a model to predict dealer busts
    Then I decided to predict blackjacks instead but neglected to renagetting_ming it
    input:
        model_kf -> 模型(一般指 random 模型)
    return:
        model_bust -> NN 模型(预测玩家初始是否 21 点)
        pred_Y_train_bust -> blackjack? 的预测值
        actuals -> blackjack? 的实际值
    '''
    # Set up variables for neural net
    feature_list = [i for i in model_kf.columns if i not in [
        'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 
        'dealer_bust_pred','new_stack', 'games_played_with_stack', 'blackjack?']]
    
    train_X_bust = np.array(model_kf[feature_list])
    train_Y_bust = np.array(model_kf['correct_action']).reshape(-1,1)
    
    # Set up a neural net with 5 layers
    model_bust = Sequential()
    model_bust.add(Dense(train_X_bust.shape[1]))
    model_bust.add(Dense(128))
    model_bust.add(Dense(32, activation='relu'))
    model_bust.add(Dense(8))
    model_bust.add(Dense(1, activation='sigmoid'))
    model_bust.compile(loss='binary_crossentropy', optimizer='sgd')
    model_bust.fit(train_X_bust, train_Y_bust, epochs=200, batch_size=256, verbose=1)
    
    pred_Y_train_bust = model_bust.predict(train_X_bust)
    actuals = train_Y_bust[:, -1]
    
    return model_bust, pred_Y_train_bust, actuals
def comparison_chart(data, position):
    '''
    绘制多模型数据分析图
    input:
        data -> 数据集
        position -> dealer / player
    '''
    fig, ax = plt.subplots(figsize=(12,6))
    ax.bar(x=data.index-0.3, height=data['random'].values, color='blue', width=0.3, label='Random')
    ax.bar(x=data.index, height=data['naive'].values, color='orange', width=0.3, label='Naive')
    ax.bar(x=data.index+0.3, height=data['smart'].values, color='red', width=0.3, label='Smart')
    ax.set_ylabel('Probability of Tie or Win', fontsize=16)
    if position == 'dealer':
        ax.set_xlabel("Dealer's Card", fontsize=16)
        plt.xticks(np.arange(2, 12, 1.0))
    elif position == 'player':
        ax.set_xlabel("Player's Hand Value", fontsize=16)
        plt.xticks(np.arange(4, 21, 1.0))
    plt.legend()
    plt.tight_layout()
    plt.savefig(fname= './img/' + position + '_card_probs_smart', dpi=150)
def comparison(model_kf_naive, model_kf_random, model_kf_smart):
    '''
    多个模型数据分析
    input:
        model_kf_naive -> naive 模型
        model_kf_random -> random 模型
        model_kf_smart -> NN 模型
    output:
        ./img/dealer_card_probs_smart -> 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率
        ./img/player_card_probs_smart -> 模型对比:按玩家发牌分组,分析玩家“不输”的概率
        ./img/hit_frequency -> 模型对比:按玩家发牌分组,对比 naive 模型与 NN 模型玩家“要牌”的频率
        ./img/hit_frequency2 -> 针对玩家发牌为 12, 13, 14, 15, 16 的数据,按庄家发牌分组,分析玩家“要牌”的频率
    '''
    # 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率
    # 保守模型
    data_naive = 1 - (model_kf_naive.grouper(by='dealer_card_num').total_sum()['lose'] / 
                        model_kf_naive.grouper(by='dealer_card_num').count()['lose'])
    # 随机模型
    data_random = 1 - (model_kf_random.grouper(by='dealer_card_num').total_sum()['lose'] / 
                        model_kf_random.grouper(by='dealer_card_num').count()['lose'])
    # 新模型
    data_smart = 1 - (model_kf_smart.grouper(by='dealer_card_num').total_sum()['lose'] / 
                        model_kf_smart.grouper(by='dealer_card_num').count()['lose'])
    
    data = mk.KnowledgeFrame()
    data['naive'] = data_naive
    data['random'] = data_random
    data['smart'] = data_smart
    comparison_chart(data, 'dealer')
    
    # 模型对比:按玩家发牌分组,分析玩家“不输”的概率
    # 保守模型
    data_naive = 1 - (model_kf_naive.grouper(by='player_total_initial').total_sum()['lose'] / 
                        model_kf_naive.grouper(by='player_total_initial').count()['lose'])
    # 随机模型
    data_random = 1 - (model_kf_random.grouper(by='player_total_initial').total_sum()['lose'] / 
                        model_kf_random.grouper(by='player_total_initial').count()['lose'])
    # 新模型
    data_smart = 1 - (model_kf_smart.grouper(by='player_total_initial').total_sum()['lose'] / 
                        model_kf_smart.grouper(by='player_total_initial').count()['lose'])
    data =  | 
	mk.KnowledgeFrame() | 
	pandas.DataFrame | 
| 
	# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime
import numpy as np
from decimal import Decimal
import scipy.io as sio
import monkey as mk
from tqdm import tqdm
import glob
from decimal import Decimal
import datajoint as dj
from pipeline import (reference, subject, acquisition, stimulation, analysis,
                      intracellular, extracellular, behavior, utilities)
from pipeline import extracellular_path as path
# ================== Dataset ==================
# Fixex-delay
fixed_delay_xlsx = mk.read_excel(
    os.path.join(path, 'FixedDelayTask', 'SI_table_2_bilateral_perturb.xlsx'),
    index_col =0, usecols='A, P, Q, R, S', skiprows=2, nrows=20)
fixed_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
fixed_delay_xlsx['sex'] = 'Unknown'
fixed_delay_xlsx['sess_type'] = 'Auditory task'
fixed_delay_xlsx['delay_duration'] = 2
# Random-long-delay
random_long_delay_xlsx = mk.read_excel(
    os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
    index_col =0, usecols='A, P, Q, R, S', skiprows=5, nrows=23)
random_long_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_long_delay_xlsx['sex'] = 'Unknown'
random_long_delay_xlsx['sess_type'] = 'Auditory task'
random_long_delay_xlsx['delay_duration'] = np.nan
# Random-short-delay
random_short_delay_xlsx = mk.read_excel(
    os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
    index_col =0, usecols='A, F, G, H, I', skiprows=42, nrows=11)
random_short_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_short_delay_xlsx['sex'] = 'Unknown'
random_short_delay_xlsx['sess_type'] = 'Auditory task'
random_short_delay_xlsx['delay_duration'] = np.nan
# Tactile-task
tactile_xlsx = mk.read_csv(
    os.path.join(path, 'TactileTask', 'Whisker_taskTavle_for_paper.csv'),
    index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=30)
tactile_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
tactile_xlsx = tactile_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
tactile_xlsx['sess_type'] = 'Tactile task'
tactile_xlsx['delay_duration'] = 1.2
# Sound-task 1.2s
sound12_xlsx = mk.read_csv(
    os.path.join(path, 'Sound task 1.2s', 'OppositeTask12_for_paper.csv'),
    index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=37)
sound12_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
sound12_xlsx = sound12_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
sound12_xlsx['sess_type'] = 'Auditory task'
sound12_xlsx['delay_duration'] = 1.2
# concating total_all 5
meta_data =  | 
	mk.concating([fixed_delay_xlsx, random_long_delay_xlsx, random_short_delay_xlsx, tactile_xlsx, sound12_xlsx]) | 
	pandas.concat |