Dataset Viewer
	prompt
				 
			stringlengths 19 
			1.03M 
			 | completion
				 
			stringlengths 4 
			2.12k 
			 | api
				 
			stringlengths 8 
			90 
			 | 
|---|---|---|
	#!/usr/bin/python3
import sys
import copy
from pathlib import Path
from datetime import datetime,timedelta
import re
import matplotlib.pyplot as plt
import math
import numpy as np
import random
import pandas as pd
import subprocess
from pickle import dump,load
from predictor.utility import msg2log
from clustgelDL.auxcfg  import D_LOGS,log2All
from canbus.BF import BF
DB_NAME="canbas"
""" DB in repository.
DB fields are following:  
"""
DT="Date Time"
DUMP="Dump"
MATCH_KEY="Match_key"
METHOD="Method"
PKL="PKL"
REPOSITORY="Repository"
MISC="Misc"
DB_COLS=[DT, DUMP, MATCH_KEY, METHOD, PKL, REPOSITORY, MISC]
# number of randomly generated 'no signal' bits in bit stream
INSERTED_NO_SIGNAL=5
# phy layer state
SIG_  = 0
SIG_0 = 1
SIG_1 = 2
#transtions
T__ = 0   # no signal to no signal SIG_ -> SIG_
T_0 = 1   # SIG_ -> SIG_0
T0_ = 2   # SIG_0 -> SIG_
T_1 = 3   # SIG_ -> SIG_1
T1_ = 4   # SIG_1 _> SIG_
T00 = 5   # SIG_0 -> SIG_0
T01 = 6   # SIG_0 -> SIG_1
T10 = 7   # SIG_1 -> SIG_0
T11 = 8   # SIG_1 -> SIG_1
TAN = 9
tr_names={T__:"no signal waveform",
          T_0 :"transition to zero",
          T0_ : "transition from zero",
          T_1 : "transition to one",
          T1_ : "transition from one",
          T00 : "transition zero-zero",
          T01 : "transition zero-one",
          T10 : "transition one-zero",
          T11 : "transition one-one",
          TAN : "possible anomaly"
          }
tr_labels={T__:"**", T_0:"*0",T0_:"0*",T_1:"*1",T1_:"1*",T00:"00",T01:"01",T10:"10",T11:"11",TAN:"XX"}
""" Linear interpolator for 'slope' part of waveform."""
def interpSlopeWF(fsample:float=16e+06,bitrate:float=125000.0,slope:float=0.1,left_y:float=0.0, right_y:float=1.0,
                  f:object=None)->np.array:
    """
    :param fsample:
    :param bitrate:
    :param slope:
    :param left_y:
    :param right_y:
    :param f:
    :return:
    """
    n0 = int(slope * (fsample / bitrate))
    x = [0, n0]
    y = [left_y, right_y]
    xnew = np.arange(0, n0, 1)
    yinterp = np.interp(xnew, x, y)
    pure = np.array([yinterp[i] for i in range(n0)] + [right_y for i in range(n0, int(fsample / bitrate))])
    return pure
def transitionsPng(fsample:float=16e+06,bitrate:float=125000.0,snr:float=30.0,slope:float=0.2,f:object=None):
    transition_list=[T__WF,T_0WF,T_1WF,T0_WF,T00WF,T01WF,T1_WF,T10WF,T11WF]
    # fsample = 16e+06
    # bitrate = 125000.0
    # slope = 0.3
    SNR = 20
    f = None
    x = np.arange(0,int(fsample / bitrate))
    suffics = '.png'
    name="simulated_waveforms"
    waveform_png = Path(D_LOGS['plot'] / Path(name)).with_suffix(suffics)
    title="Transition Waveform( SNR={} DB, slope ={}, Fsample={} MHz, Bitrate ={} K/sec)".format( SNR, slope,
                                                                fsample/10e+6, bitrate/1e+3)
    fig,ax_array =plt.subplots(nrows=3,ncols=3,figsize = (18,5),sharex=True, sharey=True)
    fig.subplots_adjust(wspace=0.5, hspace=0.5)
    fig.suptitle(title,fontsize=16)
    i=0
    for ax in np.ravel(ax_array):
        tobj=transition_list[i](fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        tobj.genWF()
        auxTransitionsPng(ax, tobj, x)
        i=i+1
    plt.savefig(waveform_png)
    plt.close("all")
    return
def auxTransitionsPng(ax,tobj, x):
    # ln,=ax.plot(x, tobj.pure, x, tobj.signal)
    ln, = ax.plot(x, tobj.pure)
    ln, = ax.plot(x, tobj.signal)
    # ax[i, j].set_xlim(0, len(x) * 1 / fsample)
    ax.set_xlabel('time')
    ax.set_ylabel('Signal')
    ax.set_title(tobj.title)
    ax.grid(True)
    return ln
class canbusWF():
    """
    canbus
    """
    def __init__(self,fsample:float=16e+06,bitrate:float=125000.0,slope:float=0.1,SNR:int=3, f:object=None):
        self.fsample=fsample
        self.bitrate=bitrate
        self.slope=slope
        self.vcan_lD=1.5
        self.vcan_lR=2.5
        self.vcan_hR=2.5
        self.vcan_hD=3.5
        self.SNR=SNR    # 10*math.log(Vsignal/Vnoise)
        self.signal=None
        self.pure = None
        self.title=""
        self.f =f
        #hist
        self.h_min=self.vcan_lD-0.7
        self.h_max=self.vcan_hD+0.7
        self.h_step=0.5
        self.bins=[float(w/10) for w in range( int(self.h_min*10),  int((self.h_max+self.h_step)*10),
                                               int(self.h_step *10))]
        self.hist = None
        self.density = None
        pass
    """ Additive white Gaussian noise  (awgn)"""
    def awgn(self,signal:np.array=None):
        sigpower = sum([math.pow(abs(signal[i]),2) for i in range (len(signal))])
        sigpower=sigpower/len(signal)
        noisepower = sigpower/(math.pow(10,self.SNR/10))
        noise=math.sqrt(noisepower)*(np.random.uniform(-1,1,size=len(signal)))
        return noise
    def histogram(self):
        self.hist,_ = np.histogram(self.signal, self.bins, density=False)
        self.density, _ = np.histogram(self.signal, self.bins, density=True)
        return
    """ Random  signal waveform shift along t-axisto simulate the random latency in  bit stream.
    Max. shift is 10% from bit waveform period. shift_n -the number of signal samples by which the shift occurs is randomly 
    generated. shift_direction - the direction of the shift forward or back is randomized too.
    """
    def rndshift(self):
        if self.signal is None:
            return
        n,=self.signal.shape
        n_dist=int(n*0.1)
        shift_n=np.random.randint(n_dist,size=1)
        shift_direction = np.random.randint(3, size=1)
        signal_list=self.signal.tolist()
        if shift_direction ==0: # left shift, append
            for i in range(shift_n):
                signal_list.pop(0)
                signal_list.append(signal_list[-1])
        elif shift_direction==1: #right shift, insert at 0
            for i in range(shift_n):
                signal_list.pop(-1)
                signal_list.insert(0,signal_list[0])
        elif shift_direction == 2:
            for i in range(shift_n):
                signal_list.pop(-1)
                signal_list.insert(0, self.vcan_lR )
        del self.signal
        self.signal=np.array(signal_list)
        return
class T__WF(canbusWF):
    def __init__(self,fsample:float=16e+06,bitrate:float=125000.0,slope:float=0.1,SNR:int=3, f:object=None):
        super().__init__(fsample=fsample,bitrate=bitrate,slope=0.0,SNR=SNR, f=f)
        self.title="Transition _->_"
    def genWF(self):
        self.pure=np.array([self.vcan_hR for i in range(int(self.fsample/self.bitrate))])
        self.signal=np.add(self.pure,self.awgn(self.pure))
class T_0WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition _->'0'"
    def genWF(self):
        self.pure = interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
                                  left_y=self.vcan_lD, right_y=self.vcan_hD, f=self.f)
        self.signal = np.add(self.pure, self.awgn(self.pure))
class T_1WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition _->'1'"
    def genWF(self):
        self.pure = interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
                                  left_y=self.vcan_lR, right_y=self.vcan_lD, f=self.f)
        self.signal = np.add(self.pure, self.awgn(self.pure))
class T0_WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition '0'->_"
    def genWF(self):
        self.pure = interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
                                  left_y=self.vcan_hD, right_y=self.vcan_lD, f=self.f)
        self.signal = np.add(self.pure, self.awgn(self.pure))
class T1_WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition '1'->_"
    def genWF(self):
        self.pure= interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
                                 left_y=self.vcan_lD, right_y=self.vcan_lR, f=self.f)
        self.signal=np.add(self.pure,self.awgn(self.pure))
class T00WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition '0'->'0'"
    def genWF(self):
        self.pure=np.array([self.vcan_hD for i in range(int(self.fsample/self.bitrate))])
        self.signal=np.add(self.pure,self.awgn(self.pure))
class T11WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition '1'->'1'"
    def genWF(self):
        self.pure=np.array([self.vcan_lD for i in range(int(self.fsample/self.bitrate))])
        self.signal=np.add(self.pure,self.awgn(self.pure))
class T10WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition '1'->'0'"
    def genWF(self):
        self.pure= interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
                                 left_y=self.vcan_lD, right_y=self.vcan_hD, f=self.f)
        self.signal=np.add(self.pure,self.awgn(self.pure))
class T01WF(canbusWF):
    def __init__(self, fsample: float = 16e+06, bitrate: float = 125000.0, slope: float = 0.1, SNR: int = 3,
                 f: object = None):
        super().__init__(fsample=fsample, bitrate=bitrate, slope=slope, SNR=SNR, f=f)
        self.title = "Transition '0'->'1'"
    def genWF(self):
        self.pure= interpSlopeWF(fsample=self.fsample, bitrate=self.bitrate, slope=self.slope,
                                 left_y=self.vcan_hD, right_y=self.vcan_lD, f=self.f)
        self.signal=np.add(self.pure,self.awgn(self.pure))
""" Waveform per transition  dictionary """
TR_DICT={T__: T__WF,
         T_0: T_0WF,
         T_1: T_1WF,
         T0_: T0_WF,
         T00: T00WF,
         T01: T01WF,
         T1_: T1_WF,
         T10: T10WF,
         T11: T11WF}
""" Return list of  following dict 
{'DateTime':<Date Time>,
 'IF':<interface>>, 
 'ID':<canbus packet ID>,
 'Data':<canbus packet data>,
 'Packet':<ID | data> in hexa,
 'bitstr_list':<list of bits>
 'bit_str':<string of bits>
}
"""
def readChunkFromCanBusDump(offset_line:int =0, chunk_size:int=128,canbusdump:str=None, f:object=None)->list:
    parsed_list=[]
    if canbusdump is None or canbusdump=="" or not Path(canbusdump).exists():
        return parsed_list
    line_count=0
    last_line=offset_line + chunk_size
    with open(canbusdump,'r') as fcanbus:
        while line_count<offset_line:
            line = fcanbus.readline()
            if not line:
                return parsed_list
            line_count+=1
        while line_count<last_line:
            line = fcanbus.readline()
            if not line:
                return parsed_list
            line_count+=1
            parsed_list.append(parseCanBusLine(line))
    return parsed_list
"""This function parses string to 'DateTime', 'interface', 'packet ID' and 'packet Data'.
The concatenation of two elements 'ID' and 'Data' forms an additional return element 'packet'.
The packet string converts to list bit strings. Every two symbols are converted to the bit string.
All return items are merged into a dictionary. 
"""
def parseCanBusLine(line:str=None, f:object=None)->dict:
    if line is None:
        return {}
    aitems=line.split(' ')
    itemDateTime=re.search(r'\((.*?)\)',line).group(1)
    itemData=re.search('(?<=#)\w+',line).group(0)
    aitemID=aitems[2].split('#')
    itemID=aitemID[0]
    itemIF=aitems[1]
    if len(itemID)%2 != 0:
        itemID='0'+itemID
    if len(itemData)%2 !=0:
        itemData='0'+itemData
    itemPacket=itemID +itemData
    bitstr_list =packet2bits(packet=itemPacket,f=f)
    bit_str=''.join(bitstr_list)
    """ random generation 0-INSERTED_NO_SIGNAL 'no signal' bits marked as *"""
    nrnd=random.randrange(0,INSERTED_NO_SIGNAL+1)
    insnosigb=''.join(["*" for i in range(nrnd+1)])
    if len(insnosigb)>0:
        bit_str=bit_str+insnosigb
    return {'DateTime':itemDateTime,'IF':itemIF, 'ID':itemID,'Data':itemData,'Packet':itemPacket,
            'bitstr_list':bitstr_list,'bit_str':bit_str}
""" This function forms a list of bits string from a packet data
Every two symbols (two nibbles  or byte) is hex number which is converted to bit array.
The function returns the list of bit strings.
For example, packet is '6B6B00FF'
'6B'=107 =>'1101011'
'6B'=107 =>'1101011'
'00'=0 =>'00000000' 
'FF'=255 => '11111111'
The result list contains ['1101011','1101011','00000000' ,'11111111']
"""
def packet2bits(packet:str=None,f:object=None)->list:
    start=0
    step=2
    bits_list=[]
    for i in range(start,len(packet),step):
        bss="{0:b}".format(int( packet[start:start+step], 16)).zfill(8)
        bits_list.append(bss)
        start=start +step
    return bits_list
""" Transform bit to  the state, the type of waveform being be generated, according by current bit and previous state
                           st=R(bit, prev_st).
The set of states is {T__,T_0,T_1,T0_.T1_,T00,T01,T10,T11}, the current bit belongs to { '0' , '1', '*'-no signal}. 
"""
def transitionRules(prev_state:int, current_bit:str)->(int, int):
    """
    :param prev_state:
    :param current_bit:
    :return:
    """
    if prev_state==SIG_:
        if current_bit=='0':
            transition=T_0
        elif current_bit=='1':
            transition=T_1
        elif current_bit=='*':
            transition=T__
        else:
            transition=T__
    elif prev_state==SIG_0:
        if current_bit == '0':
            transition = T00
        elif current_bit == '1':
            transition = T01
        elif current_bit == '*':
            transition = T0_
        else:
            transition = T0_
    elif prev_state==SIG_1:
        if current_bit == '0':
            transition = T10
        elif current_bit == '1':
            transition = T11
        elif current_bit == '*':
            transition = T1_
        else:
            transition = T1_
    if current_bit=='0':
        new_state=SIG_0
    elif current_bit=='1':
        new_state=SIG_1
    elif current_bit == '*':
        new_state=SIG_
    else:
        new_state=SIG_
    return transition, new_state
""" Transform bit to transition according by rules 
transition=R(bit,prev_state), 
where states are { SIG_-no signal, SIG_0- zero signal, SIG_1- one signal} and
transition belongs to {T__, T_0, T_1, T0_ , T00, T01, T1_, T10, T11 }.
Return list of transition and new prev_state for next packet."""
def genTransition(prev_st:int=SIG_, bit_str:str=None, f:object=None)->(list,int):
    """ transition array generation"""
    transition=[]
    st=prev_st
    for i in range(len(bit_str)):
        tr,st=transitionRules(st, bit_str[i])
        transition.append(tr)
    prev_st=SIG_
    return transition,prev_st
def logPackets(ld:list,offset_line:int=0,chunk_size:int=16):
    msg = "\nChunk start: {} Chunk size: {}\n".format(offset_line,chunk_size)
    msg2log(None,msg,D_LOGS['block'])
    msg = "{:<30s} {:<9s} {:^8s} {:^8s} {:<16s} ".format('Date Time','Interface','ID', 'Data','Packet')
    for dct in ld:
        msg="{:<30s} {:<9s} {:<8s} {:<8s} {:<16s} ".format(dct['DateTime'], dct['IF'], dct['ID'], dct['Data'],
                                                           dct['Packet'])
        msg2log(None,msg,D_LOGS['block'])
    return
""" For chunk generate list of trasitions."""
def trstreamGen(canbusdump:str="", offset_line:int=0, chunk_size:int=16, prev_state:int=SIG_, f:object=None)->list:
    # offset_line = offset_line
    # chunk_size = chunk_size
    # canbusdump = canbusdump
    transition_stream = []
    ld = readChunkFromCanBusDump(offset_line=offset_line, chunk_size=chunk_size, canbusdump=canbusdump, f=f)
    if not ld:
        return transition_stream
    logPackets(ld=ld,offset_line=offset_line,chunk_size=chunk_size)
    for dct in ld:
        transition, prev_state = genTransition(prev_st=prev_state, bit_str=dct['bit_str'], f=f)
        transition_stream.append(transition)
    return transition_stream
""" Generation of the waveforma according to the bit stream. 
A statistical estimate of the histogram is calculated for each waveform.
At the training stage  within one packet (frame), histograms are averaged over the type of bit transitions.
The resulting histogram concatenated with type of the bit is added to Blooom Filter. (T.B.D. - add to DB too).
At the test stage no averaging. The histogram concatenated with the type of the bit is checked with BF. If no matc there
is an anomaly symptom. 
"""
def wfstreamGen(mode:str='train',transition_stream:list=[],fsample:float=16e+6,bitrate:float=125000.0, slope:float=0.1,
                snr:float=20, trwf_d:dict=TR_DICT,bf:BF=None, title:str="", repository:str="", f:object=None)->dict:
    """
    :param mode:
    :param transition_stream:
    :param fsample:
    :param bitrate:
    :param slope:
    :param snr:
    :param trwf_d:
    :param bf:
    :param title:
    :param f:
    :return:
    """
    packet_in_stream = -1
    anomaly_d={}
    loggedSignal = np.array([])
    loggedHist   = []
    numberLoggedBit = 16
    subtitle="Fsample={} MHz Bitrate={} Kbit/sec SNR={} Db".format(round(fsample/10e+6,3), round(bitrate/10e3,2),
                                                                   round(snr,0))
    """ random number for logged packet in the stream """
    loggedPacket=random.randrange(0,len(transition_stream))
    sum_match_train  = 0
    sum_no_match_train = 0
    sum_match_test   = 0
    sum_no_match_test  = 0
    for packet in transition_stream:
        packet_in_stream+=1
        # here accumulated histogram per transition in following structue {transit:list}
        tr_hist ={T__: [],  T_0: [],T0_: [],T_1: [], T1_: [],T00: [],T01: [],T10: [], T11: []}
        n_match_train=0
        no_match_train=0
        n_match_test = 0
        no_match_test = 0
        startLoggedBit=-1
        endLoggedBit = -1
        """ logged bits in the packet """
        if packet_in_stream == loggedPacket:
            startLoggedBit=random.randrange(0,len(packet))
            endLoggedBit  =startLoggedBit + numberLoggedBit
        bit_in_packet=-1
        for transit in packet:
            bit_in_packet+=1
            cb=trwf_d[transit](fsample=fsample,bitrate=bitrate, slope=slope, SNR=snr, f=f)
            cb.genWF()
            cb.histogram()
            """ select signals for charting """
            if bit_in_packet>=startLoggedBit and bit_in_packet<endLoggedBit:
                loggedSignal=np.concatenate((loggedSignal,cb.signal))
                loggedHist.append(cb.hist)
            if mode=='train':
                tr_hist[transit].append(cb.hist)
                continue
            """ hist to word """
            if bf is None:
                continue
            word=hex(transit).lstrip("0x")+"_"
            word = word + ''.join([hex(vv).lstrip("0x").rstrip("L") for vv in cb.hist.tolist()])
            if not bf.check_item(word):
                msg="no match in DB for {} transition  in {} packet".format(transit, packet_in_stream)
                msg2log("Warning!",msg,D_LOGS['predict'])
                msg2log("Warning!", msg, f)
                anomaly_d[packet_in_stream]={transit:tr_names[transit]}
                no_match_test += 1
            else:
                msg2log(None, "Match for {} transition  in {} packet".format(transit, packet_in_stream), D_LOGS['predict'])
                n_match_test += 1
        if mode=='test':
            msg2log(None, "\nTest\nmatch: {} no match: {}".format(n_match_test,no_match_test), D_LOGS['predict'])
        if mode=='train':
            """ histogram averaging """
            for key,val in tr_hist.items():
                if not val:
                    continue
                allhists=np.array(val)
                avehist=np.average(allhists,axis=0)
                if bf is None:
                    continue
                word = hex(key).lstrip("0x") + "_"
                word = word + ''.join([hex(int(vv)).lstrip("0x").rstrip("L") for vv in avehist.tolist()])
                if bf.check_item(word):
                    msg2log(None,"Match for {} transition  in {} packet".format(key,packet_in_stream),D_LOGS['train'])
                    n_match_train+=1
                else:
                    bf.add_item(word)
                    no_match_train+=1
            msg2log(None,"\nTrain\nmatch: {} no match:{}".format(n_match_train,no_match_train),D_LOGS['train'])
        sum_match_train +=n_match_train
        sum_no_match_train +=no_match_train
        sum_match_test +=n_match_test
        sum_no_match_test +=no_match_test
    if mode=="train":
        msg2log(None, "\nTrain summary for SNR={} DB\nmatch: {} no match:{}".format(snr, sum_match_train, sum_no_match_train),
                D_LOGS['train'])
        bf.save(repository)
    if mode=="test":
        msg2log(None, "\nTest summary for SNR = {} DB\nmatch: {} no match:{}".format(snr,sum_match_test, sum_no_match_test),
                    D_LOGS['predict'])
    log2All()
    if len(loggedSignal)>0:
        plotSignal(mode=mode, signal=loggedSignal, packetNumber=0, fsample=fsample, startBit=startLoggedBit,
                       title=title, subtitle=subtitle)
    return anomaly_d
def plotSignal(mode:str="train", signal:np.array=None, fsample:float=1.0, packetNumber:int=0, startBit:int=0,
               title:str="",subtitle:str=""):
    pass
    suffics = '.png'
    signal_png = Path(D_LOGS['plot'] / Path(title)).with_suffix(suffics)
    delta=1.0/fsample
    t=np.arange(0.0, (len(signal)-1)*delta, delta)
    n=min(len(t),len(signal))
    fig, ax = plt.subplots(figsize=(18, 5))
    ax.plot(t[:n],signal[:n], color='r')
    ax.set_xlabel('time')
    ax.set_ylabel('Signal wavefors')
    ax.set_title(title)
    ax.grid(True)
    plt.savefig(signal_png)
    plt.close("all")
    return
""" Get number of lines in dump file.
This function is executed ib the subprocess"""
def file_len(fname)->int:
    n=-1
    if Path(fname).exists():
        if sys.platform.startswith('linux'):
            try:
                p = subprocess.Popen(['wc', '-l', fname], stdout=subprocess.PIPE,
                                                          stderr=subprocess.PIPE)
                result, err = p.communicate()
                if p.returncode != 0:
                    #    we will not raise any exception , raise IOError(err)
                    n= -2
                n= int(result.strip().split()[0])
            except:
                pass
            finally:
                pass
        elif sys.platform.startswith('win'):
            fr=open(fname,'r')
            n=0
            while 1:
                line=fr.readline()
                if line is None:
                    break
                n+=1
            fr.close()
        else:
            n=-3
    return n
def dict2csv(d:dict=None, folder:str="", title:str="", dset_name:str=None, match_key:str='ID', f:object=None):
    if d is None:
        msg2log(None,"No dictionary {} {} for saving".format(title,match_key),f)
        return
    if dset_name is None or len(dset_name)<1 or ".csv" not in dset_name:
        msg2log(None,"Dtaset name is not set correctly {}".format(dset_name),f)
        return
    df=pd.DataFrame(d)
    df.to_csv(dset_name)
    msg2log(None,"{} dictionary for {} saved in {}".format(title,match_key,dset_name),f)
    return
def dict2pkl(d:dict=None, folder:str="", title:str="", match_key:str='ID', f:object=None)->(str,str):
    if d is None:
        msg2log(None,"No dictionary {} {} for saving".format(title,match_key),f)
        return
    file_pkl=Path(Path(folder)/Path("{}_{}".format(title,match_key))).with_suffix(".pkl")
    f_pkl=open(str(file_pkl),"wb")
    dump(d,f_pkl)
    msg2log(None,"{} dictionary for {} saved in {}".format(title,match_key,str(file_pkl)),f)
    pkl_stem=file_pkl.stem
    return pkl_stem, str(file_pkl)
def pkl2dict( folder: str = "", title: str = "", match_key: str = 'ID', pkl_stem:str="", f: object = None):
    file_pkl = Path(Path(folder) / Path("{}".format(pkl_stem))).with_suffix(".pkl")
    if not file_pkl.exists():
        msg="Serialized dictionary {} for {} -match key was not found in {} repository".format(pkl_stem,
                                                                                               match_key,folder)
        msg2log(None,msg,f)
        return None
    f_pkl = open(str(file_pkl), "rb")
    d=load(f_pkl)
    msg2log(None, "{} dictionary for {} loaded from {}".format(title, match_key, str(file_pkl)), f)
    return d
"""" statistical estimation for observed data"""
def mleexp(target_dict:dict=None, mleexp_dict:dict=None, n_min:int=5, title:str="Train path", f:object=None):
    msg="{}\n,Rare packets, no maximum likelihood estimation for exponential distribution of time gaps between " +\
        " packets appearing.".format(title)
    msg2log(None, msg, D_LOGS['cluster'])
    for key,vlist in target_dict.items():
        if len(vlist)<n_min:
            msg=f"""Packet with matched mey: {key}  is rare event: {len(vlist)} appearings"""
            msg2log(None,msg,D_LOGS['cluster'])
            continue
        l_duration=[vlist[i]-vlist[i-1] for i in range(1,len(vlist))]
        n=len(l_duration)
        sum_items=float(sum(l_duration))/1e06   # in seconds
        mle_lambda=float(n)/sum_items
        mle_var_lambda=(mle_lambda*mle_lambda)/float(n)
        mleexp_dict[key]={"n":n,"mle":mle_lambda,"var":mle_var_lambda,"sample":l_duration}
    return
def KL_decision(train_mleexp:dict=None, test_mleexp:dict=None, title:str="Anomaly packet",f:object=None)->list:
    trainSet=set(train_mleexp)
    testSet=set(test_mleexp)
    anomaly_list=[]
    chi2_1_05=3.84
    for key in trainSet.intersection(testSet):
        anomaly_counter=0
        train_val=train_mleexp[key]
        test_val=test_mleexp[key]
        lst_val=train_val['sample']+test_val['sample']
        xmean=np.array(lst_val).mean()
        xtrain=np.array(train_val['sample']).mean()
        xtest = np.array(test_val['sample']).mean()
        ntrain=train_val['n']
        ntest = test_val['n']
        KL2I12=ntrain*(xtrain-xmean)*(xtrain-xmean)/xmean + ntest*(xtest-xmean)*(xtest-xmean)/xmean
        KLJ12 =0.5*KL2I12 + 0.5 *( ntrain * (xtrain - xmean) * (xtrain - xmean) / xtrain + ntest * (xtest - xmean) * (
                    xtest - xmean) / xtest)
        if KL2I12>chi2_1_05 or KLJ12 > chi2_1_05:
            anomaly_counter+=1
            anomaly_list.append({'matched_key':key,"2I(1:2)":KL2I12,"J(1,2)":KLJ12, "chi2(1,0.05)":chi2_1_05,
                                 "train":train_val,"test":test_val,})
    return anomaly_list
def manageDB(repository:str=None, db:str=None,op:str='select',d_query:dict={}, f:object=None)->dict:
    file_db=Path(Path(repository)/Path(db)).with_suffix(".csv")
    if not file_db.exists():
        createDB(file_db=file_db, f=f)
    if op=='select':
        d_res = selectDB(file_db=file_db, d_query=d_query,f=f)
    elif op=='insert':
        d_res = insertDB(file_db= file_db, d_query = d_query, f = f)
        pass
    elif op=='update':
        pass
    elif op=='log':
        pass
    else:
        pass
    return d_res
def createDB(file_db:str=None, f:object=None):
    df=pd.DataFrame(columns=DB_COLS)
    df.to_csv(file_db,index=False)
    msg2log(None,"DB created {}".format(file_db),)
    return
def selectDB(file_db:str=None, d_query:dict=None,f:object=None)->dict:
    if file_db is None or not Path(file_db).exists() or d_query is None or len(d_query)==0:
        return None
    d_res={}
    l_res=[]   #list of dict
    df=pd.read_csv(file_db)
    for index,row in df.iterrows():
        keys=list(row.keys())
        if dictIndict(row,d_query,f=f):
            l_res.append(row)
    if len(l_res)>0:
        msg=f"""
Query: {d_query}
Selected: {l_res}
"""
        msg2log(None,msg,f)
        d_res=dict(l_res[-1] ) # select last item in list. The item gas Series -type and so it should be casted to dict.
    return d_res
def insertDB(file_db:str=None, d_query:dict=None,f:object=None)->dict:
    if file_db is None or not Path(file_db).exists() or d_query is None or len(d_query)==0:
        return None
    keys=list(d_query.keys())
    d_insert={item:"" for item in DB_COLS}
    for item in keys:
        d_insert[item]=d_query[item]
    d_insert[DT]= 
 | 
	pd.Timestamp.now() 
 | 
	pandas.Timestamp.now 
 | 
					
	#!/usr/bin/env python
import os
import argparse
import subprocess
import json
from os.path import isfile, join, basename
import time
import pandas as pd 
from datetime import datetime
import tempfile
import sys
sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_generator')))
import route_gen
def main():
    '''
    The algorithm for benchmark works as follow:
        For a certain number of iteration:
            generate instance with default generator value
            for each encoding inside subfolders of encoding (one folder for each encoding):
                start timer
                solve with clyngo
                stop timer
                test solution:
                    if legal
                        add time in a csv (S)
                    else:
                        add int max as time
                        print an error message
    '''
    parser = argparse.ArgumentParser(description='Benchmark ! :D')
    parser.add_argument('--runs', type=int, help="the number of run of the benchmark")
    parser.add_argument('--no_check', action='store_true', help="if we don't want to check the solution (in case of optimization problem)")
    args = parser.parse_args()
    number_of_run = args.runs
    print("Start of the benchmarks")
    encodings = [x for x in os.listdir("../encoding/")]
    print("Encodings to test:")
    for encoding in encodings:
        print("\t-{}".format(encoding))
    results = []
    costs_run = []
    for i in range(number_of_run):
        print("Iteration {}".format(i + 1))
        result_iteration = dict()
        cost_iteration = dict()
        instance, minimal_cost = route_gen.instance_generator()
        # we get the upper bound of the solution generated by the generator
        cost_iteration["Benchmark_Cost"] = minimal_cost
        correct_solution = True
        instance_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
        instance_temp.write(repr(instance))
        instance_temp.flush()
        for encoding in encodings:
            print("Encoding {}:".format(encoding))
            files_encoding = ["../encoding/" + encoding + "/" + f for f in os.listdir("../encoding/" + encoding) if isfile(join("../encoding/" + encoding, f))]
            start = time.time()
            try:
                if 'parallel' == encoding:
                    clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"] + ['-t 8compete'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                else:
                    clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                (stdoutdata, stderrdata) = clingo.communicate(timeout=3600)
                clingo.wait()
                end = time.time()
                duration = end - start
                json_answers = json.loads(stdoutdata)
                cost = float('inf')
                answer = []
                # we need to check all solution and get the best one
                for call_current in json_answers["Call"]:
                    if "Witnesses" in call_current:
                        answer_current = call_current["Witnesses"][-1]
                        if "Costs" in answer_current:
                            current_cost = sum(answer_current["Costs"])
                            if current_cost < cost:
                                answer = answer_current["Value"]
                                cost = current_cost
                        else:
                            cost = 0
                            answer = answer_current["Value"]
                # we append "" just to get the last . when we join latter
                answer = answer + [""]
                answer_str = ".".join(answer)
                answer_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
                answer_temp.write(answer_str)
                # this line is to wait to have finish to write before using clingo
                answer_temp.flush()
                clingo_check = subprocess.Popen(
                    ["clingo"] + ["../test_solution/test_solution.lp"] + [basename(answer_temp.name)] + [
                        basename(instance_temp.name)] + ["--outf=2"] + ["-q"], stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
                (stdoutdata_check, stderrdata_check) = clingo_check.communicate()
                clingo_check.wait()
                json_check = json.loads(stdoutdata_check)
                answer_temp.close()
                os.remove(answer_temp.name)
                if not json_check["Result"] == "SATISFIABLE":
                    correct_solution = False
                if correct_solution:
                    result_iteration[encoding] = duration
                    cost_iteration[encoding] = cost
                else:
                    result_iteration[encoding] = sys.maxsize
                    cost_iteration[encoding] = float("inf")
                print("\tSatisfiable {}".format(correct_solution))
                print("\tDuration {} seconds".format(result_iteration[encoding]))
                print("\tBest solution {}".format(cost))
                print("\tBenchmark cost {}".format(minimal_cost))
            except Exception as excep:
                result_iteration = str(excep)
                cost_iteration = float('inf')
        results.append(result_iteration)
        costs_run.append(cost_iteration)
        instance_temp.close()
        os.remove(basename(instance_temp.name))
    df =  
 | 
	pd.DataFrame(results) 
 | 
	pandas.DataFrame 
 | 
					
	import os
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
DATASET_DIR: str = "data/"
# https://www.kaggle.com/rakannimer/air-passengers
def read_air_passengers() -> Tuple[pd.DataFrame, np.ndarray]:
    indexes = [6, 33, 36, 51, 60, 100, 135]
    values = [205, 600, 150, 315, 150, 190, 620]
    return _add_outliers_set_datetime(
         
 | 
	pd.read_csv(f"{DATASET_DIR}air_passengers.csv") 
 | 
	pandas.read_csv 
 | 
					
	#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   ioutil.py
@Desc    :   Input and output data function.
'''
# here put the import lib
import os
import sys
import pandas as pd
import numpy as np
from . import TensorData
import csv
from .basicutil import set_trace
class File():
    def __init__(self, filename, mode, idxtypes):
        self.filename = filename
        self.mode = mode
        self.idxtypes = idxtypes
        self.dtypes = None
        self.sep = None
    def get_sep_of_file(self):
        '''
        return the separator of the line.
        :param infn: input file
        '''
        sep = None
        fp = open(self.filename, self.mode)
        for line in fp:
            line = line.decode(
                'utf-8') if isinstance(line, bytes) else line
            if (line.startswith("%") or line.startswith("#")):
                continue
            line = line.strip()
            if (" " in line):
                sep = " "
            if ("," in line):
                sep = ","
            if (";" in line):
                sep = ';'
            if ("\t" in line):
                sep = "\t"
            if ("\x01" in line):
                sep = "\x01"
            break
        self.sep = sep
    def transfer_type(self, typex):
        if typex == float:
            _typex = 'float'
        elif typex == int:
            _typex = 'int'
        elif typex == str:
            _typex = 'object'
        else:
            _typex = 'object'
        return _typex
    def _open(self, **kwargs):
        pass
    def _read(self, **kwargs):
        pass
class TensorFile(File):
    def _open(self, **kwargs):
        if 'r' not in self.mode:
            self.mode += 'r'
        f = open(self.filename, self.mode)
        pos = 0
        cur_line = f.readline()
        while cur_line.startswith("#"):
            pos = f.tell()
            cur_line = f.readline()
        f.seek(pos)
        _f = open(self.filename, self.mode)
        _f.seek(pos)
        fin = pd.read_csv(f, sep=self.sep, **kwargs)
        column_names = fin.columns
        self.dtypes = {}
        if not self.idxtypes is None:
            for idx, typex in self.idxtypes:
                self.dtypes[column_names[idx]] = self.transfer_type(typex)
            fin = pd.read_csv(_f, dtype=self.dtypes, sep=self.sep, **kwargs)
        else:
            fin = pd.read_csv(_f, sep=self.sep, **kwargs)
        return fin
    def _read(self, **kwargs):
        tensorlist = []
        self.get_sep_of_file()
        _file = self._open(**kwargs)
        if not self.idxtypes is None:
            idx = [i[0] for i in self.idxtypes]
            tensorlist = _file[idx]
        else:
            tensorlist = _file
        return tensorlist
class CSVFile(File):
    def _open(self, **kwargs):
        f = pd.read_csv(self.filename, **kwargs)
        column_names = list(f.columns)
        self.dtypes = {}
        if not self.idxtypes is None:
            for idx, typex in self.idxtypes:
                self.dtypes[column_names[idx]] = self.transfer_type(typex)
            f = pd.read_csv(self.filename, dtype=self.dtypes, **kwargs)
        else:
            f = pd.read_csv(self.filename, **kwargs)
        return f
    def _read(self, **kwargs):
        tensorlist =  
 | 
	pd.DataFrame() 
 | 
	pandas.DataFrame 
 | 
					
	import logging
import os
import pickle
import tarfile
from typing import Tuple
import numpy as np
import pandas as pd
import scipy.io as sp_io
import shutil
from scipy.sparse import csr_matrix, issparse
from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download
logger = logging.getLogger(__name__)
class ATACDataset(GeneExpressionDataset):
    """Loads a file from `10x`_ website.
    :param dataset_name: Name of the dataset file. Has to be one of:
        "CellLineMixture", "AdBrainCortex", "P0_BrainCortex".
    :param save_path: Location to use when saving/loading the data.
    :param type: Either `filtered` data or `raw` data.
    :param dense: Whether to load as dense or sparse.
        If False, data is cast to sparse using ``scipy.sparse.csr_matrix``.
    :param measurement_names_column: column in which to find measurement names in the corresponding `.tsv` file.
    :param remove_extracted_data: Whether to remove extracted archives after populating the dataset.
    :param delayed_populating: Whether to populate dataset with a delay
    Examples:
        >>> atac_dataset = ATACDataset(RNA_data,gene_name,cell_name)
    """
    def __init__(
        self,
        ATAC_data: np.matrix = None,
        ATAC_name: pd.DataFrame = None,
        cell_name: pd.DataFrame = None,
        delayed_populating: bool = False,
        is_filter = True,
        datatype="atac_seq",
    ):
        if ATAC_data.all() == None:
            raise Exception("Invalid Input, the gene expression matrix is empty!")
        self.ATAC_data = ATAC_data
        self.ATAC_name = ATAC_name
        self.cell_name = cell_name
        self.is_filter = is_filter
        self.datatype = datatype
        self.cell_name_formulation = None
        self.atac_name_formulation = None
        if not isinstance(self.ATAC_name, pd.DataFrame):
            self.ATAC_name =  
 | 
	pd.DataFrame(self.ATAC_name) 
 | 
	pandas.DataFrame 
 | 
					
	from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd 
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB 
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
    global DataRawLength
    global DataResultsRaw
    global previousState
    previousState = []\
    global StanceTest
    StanceTest = False
    global filterActionFinal
    filterActionFinal = ''
    global keySpecInternal
    keySpecInternal = 1
    global RANDOM_SEED
    RANDOM_SEED = 42
    global keyData
    keyData = 0
    global keepOriginalFeatures
    keepOriginalFeatures = []
    global XData
    XData = []
    global yData
    yData = []
    global XDataNoRemoval
    XDataNoRemoval = []
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = []
    global XDataStored
    XDataStored = []
    global yDataStored
    yDataStored = []
    
    global finalResultsData
    finalResultsData = []
    global detailsParams
    detailsParams = []
    global algorithmList
    algorithmList = []
    global ClassifierIDsList
    ClassifierIDsList = ''
    global RetrieveModelsList
    RetrieveModelsList = []
    global allParametersPerfCrossMutr
    allParametersPerfCrossMutr = []
    global all_classifiers
    all_classifiers = []
    global crossValidation
    crossValidation = 8
    #crossValidation = 5
    #crossValidation = 3
    global resultsMetrics
    resultsMetrics = []
    global parametersSelData
    parametersSelData = []
    global target_names
    target_names = []
    global keyFirstTime
    keyFirstTime = True
    global target_namesLoc
    target_namesLoc = []
    global featureCompareData
    featureCompareData = []
    global columnsKeep
    columnsKeep = []
    global columnsNewGen
    columnsNewGen = []
    global columnsNames
    columnsNames = []
    global fileName
    fileName = []
    global listofTransformations
    listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
    return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
    global DataRawLength
    global DataResultsRaw
    global DataResultsRawTest
    global DataRawLengthTest
    global DataResultsRawExternal
    global DataRawLengthExternal
    global fileName
    fileName = []
    fileName = request.get_data().decode('utf8').replace("'", '"')
    global keySpecInternal
    keySpecInternal = 1
    global filterActionFinal
    filterActionFinal = ''
    global dataSpacePointsIDs
    dataSpacePointsIDs = []
    global RANDOM_SEED
    RANDOM_SEED = 42
    global keyData
    keyData = 0
    global keepOriginalFeatures
    keepOriginalFeatures = []
    global XData
    XData = []
    global XDataNoRemoval
    XDataNoRemoval = []
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = []
    global previousState
    previousState = []
    global yData
    yData = []
    global XDataStored
    XDataStored = []
    global yDataStored
    yDataStored = []
    global finalResultsData
    finalResultsData = []
    global ClassifierIDsList
    ClassifierIDsList = ''
    global algorithmList
    algorithmList = []
    global detailsParams
    detailsParams = []
    # Initializing models
    global RetrieveModelsList
    RetrieveModelsList = []
    global resultsList
    resultsList = []
    global allParametersPerfCrossMutr
    allParametersPerfCrossMutr = []
    global HistoryPreservation
    HistoryPreservation = []
    global all_classifiers
    all_classifiers = []
    global crossValidation
    crossValidation = 8
    #crossValidation = 5
    #crossValidation = 3
    global parametersSelData
    parametersSelData = []
    global StanceTest
    StanceTest = False
    global target_names
    
    target_names = []
    global keyFirstTime
    keyFirstTime = True
    global target_namesLoc
    target_namesLoc = []
    global featureCompareData
    featureCompareData = []
    global columnsKeep
    columnsKeep = []
    global columnsNewGen
    columnsNewGen = []
    global columnsNames
    columnsNames = []
    global listofTransformations
    listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
    DataRawLength = -1
    DataRawLengthTest = -1
    data = json.loads(fileName)  
    if data['fileName'] == 'HeartC':
        CollectionDB = mongo.db.HeartC.find()
        target_names.append('Healthy')
        target_names.append('Diseased')
    elif data['fileName'] == 'biodegC':
        StanceTest = True
        CollectionDB = mongo.db.biodegC.find()
        CollectionDBTest = mongo.db.biodegCTest.find()
        CollectionDBExternal = mongo.db.biodegCExt.find()
        target_names.append('Non-biodegr.')
        target_names.append('Biodegr.')
    elif data['fileName'] == 'BreastC':
        CollectionDB = mongo.db.breastC.find()
    elif data['fileName'] == 'DiabetesC':
        CollectionDB = mongo.db.diabetesC.find()
        target_names.append('Negative')
        target_names.append('Positive')
    elif data['fileName'] == 'MaterialC':
        CollectionDB = mongo.db.MaterialC.find()
        target_names.append('Cylinder')
        target_names.append('Disk')
        target_names.append('Flatellipsold')
        target_names.append('Longellipsold')
        target_names.append('Sphere')
    elif data['fileName'] == 'ContraceptiveC':
        CollectionDB = mongo.db.ContraceptiveC.find()
        target_names.append('No-use')
        target_names.append('Long-term')
        target_names.append('Short-term')
    elif data['fileName'] == 'VehicleC':
        CollectionDB = mongo.db.VehicleC.find()
        target_names.append('Van')
        target_names.append('Car')
        target_names.append('Bus')
    elif data['fileName'] == 'WineC':
        CollectionDB = mongo.db.WineC.find()
        target_names.append('Fine')
        target_names.append('Superior')
        target_names.append('Inferior')
    else:
        CollectionDB = mongo.db.IrisC.find()
    DataResultsRaw = []
    for index, item in enumerate(CollectionDB):
        item['_id'] = str(item['_id'])
        item['InstanceID'] = index
        DataResultsRaw.append(item)
    DataRawLength = len(DataResultsRaw)
    DataResultsRawTest = []
    DataResultsRawExternal = []
    if (StanceTest):
        for index, item in enumerate(CollectionDBTest):
            item['_id'] = str(item['_id'])
            item['InstanceID'] = index
            DataResultsRawTest.append(item)
        DataRawLengthTest = len(DataResultsRawTest)
        for index, item in enumerate(CollectionDBExternal):
            item['_id'] = str(item['_id'])
            item['InstanceID'] = index
            DataResultsRawExternal.append(item)
        DataRawLengthExternal = len(DataResultsRawExternal)
    dataSetSelection()
    return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
    uploadedData = request.get_data().decode('utf8').replace("'", '"')
    uploadedDataParsed = json.loads(uploadedData)
    DataResultsRaw = uploadedDataParsed['uploadedData']
    DataResults = copy.deepcopy(DataResultsRaw)
    for dictionary in DataResultsRaw:
        for key in dictionary.keys():
            if (key.find('*') != -1):
                target = key
                continue
        continue
    DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
    DataResults.sort(key=lambda x: x[target], reverse=True)
    for dictionary in DataResults:
        del dictionary[target]
    global AllTargets
    global target_names
    global target_namesLoc
    AllTargets = [o[target] for o in DataResultsRaw]
    AllTargetsFloatValues = []
    global fileName
    data = json.loads(fileName) 
    previous = None
    Class = 0
    for i, value in enumerate(AllTargets):
        if (i == 0):
            previous = value
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                target_names.append(value)
            else:
                pass
        if (value == previous):
            AllTargetsFloatValues.append(Class)
        else:
            Class = Class + 1
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                target_names.append(value)
            else:
                pass
            AllTargetsFloatValues.append(Class)
            previous = value
    ArrayDataResults = pd.DataFrame.from_dict(DataResults)
    global XData, yData, RANDOM_SEED
    XData, yData = ArrayDataResults, AllTargetsFloatValues
    global XDataStored, yDataStored
    XDataStored = XData.copy()
    yDataStored = yData.copy()
    global XDataStoredOriginal
    XDataStoredOriginal = XData.copy()
    global finalResultsData
    finalResultsData = XData.copy()
    global XDataNoRemoval 
    XDataNoRemoval = XData.copy()
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = XData.copy()
    return 'Processed uploaded data set'
def dataSetSelection():
    global XDataTest, yDataTest
    XDataTest = pd.DataFrame()
    global XDataExternal, yDataExternal
    XDataExternal = pd.DataFrame()
    global StanceTest
    global AllTargets
    global target_names
    target_namesLoc = []
    if (StanceTest):
        DataResultsTest = copy.deepcopy(DataResultsRawTest)
        for dictionary in DataResultsRawTest:
            for key in dictionary.keys():
                if (key.find('*') != -1):
                    target = key
                    continue
            continue
        DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
        DataResultsTest.sort(key=lambda x: x[target], reverse=True)
        for dictionary in DataResultsTest:
            del dictionary['_id']
            del dictionary['InstanceID']
            del dictionary[target]
        AllTargetsTest = [o[target] for o in DataResultsRawTest]
        AllTargetsFloatValuesTest = []
        previous = None
        Class = 0
        for i, value in enumerate(AllTargetsTest):
            if (i == 0):
                previous = value
                target_namesLoc.append(value)
            if (value == previous):
                AllTargetsFloatValuesTest.append(Class)
            else:
                Class = Class + 1
                target_namesLoc.append(value)
                AllTargetsFloatValuesTest.append(Class)
                previous = value
        ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
        XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
        DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
        for dictionary in DataResultsRawExternal:
            for key in dictionary.keys():
                if (key.find('*') != -1):
                    target = key
                    continue
            continue
        DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
        DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
        for dictionary in DataResultsExternal:
            del dictionary['_id']
            del dictionary['InstanceID']
            del dictionary[target]
        AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
        AllTargetsFloatValuesExternal = []
        previous = None
        Class = 0
        for i, value in enumerate(AllTargetsExternal):
            if (i == 0):
                previous = value
                target_namesLoc.append(value)
            if (value == previous):
                AllTargetsFloatValuesExternal.append(Class)
            else:
                Class = Class + 1
                target_namesLoc.append(value)
                AllTargetsFloatValuesExternal.append(Class)
                previous = value
        ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
        XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
    DataResults = copy.deepcopy(DataResultsRaw)
    for dictionary in DataResultsRaw:
        for key in dictionary.keys():
            if (key.find('*') != -1):
                target = key
                continue
        continue
    DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
    DataResults.sort(key=lambda x: x[target], reverse=True)
    for dictionary in DataResults:
        del dictionary['_id']
        del dictionary['InstanceID']
        del dictionary[target]
    AllTargets = [o[target] for o in DataResultsRaw]
    AllTargetsFloatValues = []
    global fileName
    data = json.loads(fileName) 
    previous = None
    Class = 0
    for i, value in enumerate(AllTargets):
        if (i == 0):
            previous = value
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                target_names.append(value)
            else:
                pass
        if (value == previous):
            AllTargetsFloatValues.append(Class)
        else:
            Class = Class + 1
            if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
                target_names.append(value)
            else:
                pass
            AllTargetsFloatValues.append(Class)
            previous = value
    dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
    
    # OneTimeTemp = copy.deepcopy(dfRaw)
    # OneTimeTemp.drop(columns=['_id', 'InstanceID'])
    # column_names = ['volAc',  'chlorides',  'density',  'fixAc' , 'totalSuDi' , 'citAc',  'resSu'  ,  'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
    # OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
    # OneTimeTemp.to_csv('dataExport.csv', index=False)
    ArrayDataResults = pd.DataFrame.from_dict(DataResults)
    global XData, yData, RANDOM_SEED
    XData, yData = ArrayDataResults, AllTargetsFloatValues
    global keepOriginalFeatures
    global OrignList
    if (data['fileName'] == 'biodegC'):
        keepOriginalFeatures = XData.copy()
        storeNewColumns = []
        for col in keepOriginalFeatures.columns:
            newCol = col.replace("-", "_")
            storeNewColumns.append(newCol.replace("_",""))
        keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
        columnsNewGen = keepOriginalFeatures.columns.values.tolist()
        OrignList = keepOriginalFeatures.columns.values.tolist()   
    else:
        keepOriginalFeatures = XData.copy()
        keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
        columnsNewGen = keepOriginalFeatures.columns.values.tolist()
        OrignList = keepOriginalFeatures.columns.values.tolist()
    XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
    XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
    XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
    global XDataStored, yDataStored
    XDataStored = XData.copy()
    yDataStored = yData.copy()
    global XDataStoredOriginal
    XDataStoredOriginal = XData.copy()
    global finalResultsData
    finalResultsData = XData.copy()
    global XDataNoRemoval 
    XDataNoRemoval = XData.copy()
    global XDataNoRemovalOrig
    XDataNoRemovalOrig = XData.copy()
    warnings.simplefilter('ignore')
    executeModel([], 0, '')
    
    return 'Everything is okay'
def create_global_function():
    global estimator
    location = './cachedir'
    memory = Memory(location, verbose=0)
    # calculating for all algorithms and models the performance and other results
    @memory.cache
    def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
        # initialize model
        print('loopModels')
        n_estimators = int(n_estimators)
        max_depth = int(max_depth)
        model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
        # set in cross-validation
        result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
        # result is mean of test_score
        return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
    global XDataTest, yDataTest
    global XDataExternal, yDataExternal
    global keyFirstTime
    global estimator
    global yPredictProb
    global scores
    global featureImportanceData
    global XData
    global XDataStored
    global previousState
    global columnsNewGen
    global columnsNames
    global listofTransformations
    global XDataStoredOriginal
    global finalResultsData
    global OrignList
    global tracker
    global XDataNoRemoval
    global XDataNoRemovalOrig
    columnsNames = []
    scores = []
    if (len(exeCall) == 0):
        if (flagEx == 3):
            XDataStored = XData.copy()
            XDataNoRemovalOrig = XDataNoRemoval.copy()
            OrignList = columnsNewGen
        elif (flagEx == 2):
            XData = XDataStored.copy()
            XDataStoredOriginal = XDataStored.copy()
            XDataNoRemoval = XDataNoRemovalOrig.copy()
            columnsNewGen = OrignList
        else:
            XData = XDataStored.copy()
            XDataNoRemoval = XDataNoRemovalOrig.copy()
            XDataStoredOriginal = XDataStored.copy()
    else:
        if (flagEx == 4):
            XDataStored = XData.copy()
            XDataNoRemovalOrig = XDataNoRemoval.copy()
            #XDataStoredOriginal = XDataStored.copy()
        elif (flagEx == 2):
            XData = XDataStored.copy()
            XDataStoredOriginal = XDataStored.copy()
            XDataNoRemoval = XDataNoRemovalOrig.copy()
            columnsNewGen = OrignList
        else:    
            XData = XDataStored.copy()
            #XDataNoRemoval = XDataNoRemovalOrig.copy()
            XDataStoredOriginal = XDataStored.copy()
    # Bayesian Optimization CHANGE INIT_POINTS!
    if (keyFirstTime):
        create_global_function()
        params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
        bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
        bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
        bestParams = bayesopt.max['params']
        estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
        columnsNewGen = OrignList
    if (len(exeCall) != 0):
        if (flagEx == 1):
            currentColumnsDeleted = []
            for uniqueValue in exeCall:
                currentColumnsDeleted.append(tracker[uniqueValue])
            for column in XData.columns:
                if (column in currentColumnsDeleted):
                    XData = XData.drop(column, axis=1)
                    XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
        elif (flagEx == 2):
            columnsKeepNew = []
            columns = XDataGen.columns.values.tolist()
            for indx, col in enumerate(columns):
                if indx in exeCall:
                    columnsKeepNew.append(col)
                    columnsNewGen.append(col)
            XDataTemp = XDataGen[columnsKeepNew]
            XData[columnsKeepNew] = XDataTemp.values
            XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
            XDataNoRemoval[columnsKeepNew] = XDataTemp.values
        elif (flagEx == 4):
            splittedCol = nodeTransfName.split('_')
            for col in XDataNoRemoval.columns:
                splitCol = col.split('_')
                if ((splittedCol[0] in splitCol[0])):
                    newSplitted = re.sub("[^0-9]", "", splittedCol[0])
                    newCol = re.sub("[^0-9]", "", splitCol[0])
                    if (newSplitted == newCol):
                        storeRenamedColumn = col
            XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
            XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
            currentColumn = columnsNewGen[exeCall[0]]
            subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
            replacement = currentColumn.replace(subString, nodeTransfName)
            for ind, column in enumerate(columnsNewGen):
                splitCol = column.split('_')
                if ((splittedCol[0] in splitCol[0])):
                    newSplitted = re.sub("[^0-9]", "", splittedCol[0])
                    newCol = re.sub("[^0-9]", "", splitCol[0])
                    if (newSplitted == newCol):
                        columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
            if (len(splittedCol) == 1):
                XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
                XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
            else:
                if (splittedCol[1] == 'r'):
                    XData[nodeTransfName] = XData[nodeTransfName].round()
                elif (splittedCol[1] == 'b'):
                    number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
                    emptyLabels = []
                    for index, number in enumerate(number_of_bins):
                        if (index == 0):
                            pass
                        else:
                            emptyLabels.append(index)
                    XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
                    XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
                elif (splittedCol[1] == 'zs'):
                    XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
                elif (splittedCol[1] == 'mms'):
                    XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
                elif (splittedCol[1] == 'l2'):
                    dfTemp = []
                    dfTemp = np.log2(XData[nodeTransfName])
                    dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
                    dfTemp = dfTemp.fillna(0)
                    XData[nodeTransfName] = dfTemp
                elif (splittedCol[1] == 'l1p'):
                    dfTemp = []
                    dfTemp = np.log1p(XData[nodeTransfName])
                    dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
                    dfTemp = dfTemp.fillna(0)
                    XData[nodeTransfName] = dfTemp       
                elif (splittedCol[1] == 'l10'):
                    dfTemp = []
                    dfTemp = np.log10(XData[nodeTransfName])
                    dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
                    dfTemp = dfTemp.fillna(0)
                    XData[nodeTransfName] = dfTemp
                elif (splittedCol[1] == 'e2'):
                    dfTemp = []
                    dfTemp = np.exp2(XData[nodeTransfName])
                    dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
                    dfTemp = dfTemp.fillna(0)
                    XData[nodeTransfName] = dfTemp
                elif (splittedCol[1] == 'em1'):
                    dfTemp = []
                    dfTemp = np.expm1(XData[nodeTransfName])
                    dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
                    dfTemp = dfTemp.fillna(0)
                    XData[nodeTransfName] = dfTemp
                elif (splittedCol[1] == 'p2'):
                    XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
                elif (splittedCol[1] == 'p3'):
                    XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
                else:
                    XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
                XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
            XDataStored = XData.copy()
            XDataNoRemovalOrig = XDataNoRemoval.copy()
            
    columnsNamesLoc = XData.columns.values.tolist()
    for col in columnsNamesLoc:
        splittedCol = col.split('_')
        if (len(splittedCol) == 1):
            for tran in listofTransformations:
                columnsNames.append(splittedCol[0]+'_'+tran)
        else:
            for tran in listofTransformations:
                if (splittedCol[1] == tran):
                    columnsNames.append(splittedCol[0])
                else:
                    columnsNames.append(splittedCol[0]+'_'+tran)
    featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
    tracker = []
    for value in columnsNewGen:
        value = value.split(' ')
        if (len(value) > 1):
            tracker.append(value[1])
        else:
            tracker.append(value[0])
    estimator.fit(XData, yData)
    yPredict = estimator.predict(XData)
    yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
    num_cores = multiprocessing.cpu_count()
    inputsSc = ['accuracy','precision_weighted','recall_weighted']
    flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
    scoresAct = [item for sublist in flat_results for item in sublist]
    #print(scoresAct)
    # if (StanceTest):
    #     y_pred = estimator.predict(XDataTest)
    #     print('Test data set')
    #     print(classification_report(yDataTest, y_pred))
    #     y_pred = estimator.predict(XDataExternal)
    #     print('External data set')
    #     print(classification_report(yDataExternal, y_pred))
    howMany = 0
    if (keyFirstTime):
        previousState = scoresAct
        keyFirstTime = False
        howMany = 3
    
    if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
        finalResultsData = XData.copy()
    if (keyFirstTime == False):
        if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
            previousState[0] = scoresAct[0]
            previousState[1] = scoresAct[1]
            howMany = 3
        #elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
            previousState[2] = scoresAct[2]
            previousState[3] = scoresAct[3]
            #howMany = howMany + 1
        #elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
            previousState[4] = scoresAct[4]
            previousState[5] = scoresAct[5]
            #howMany = howMany + 1
        #else:
            #pass
    scores = scoresAct + previousState
    if (howMany == 3):
        scores.append(1)
    else:
       scores.append(0)
    return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
    global finalResultsData
    finalResultsDataJSON = finalResultsData.to_json()
    response = {    
        'finalResultsData': finalResultsDataJSON
    }
    return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
    PerFeatureAccuracyLocalPar = []
    scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
    PerFeatureAccuracyLocalPar.append(scores.mean())
    return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
    resultsFS = []
    permList = []
    PerFeatureAccuracy = []
    PerFeatureAccuracyAll = []
    ImpurityFS = []
    RankingFS = []
    estim = clf.fit(Data, yData)
    importances = clf.feature_importances_
    # std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
    #             axis=0)
    maxList = max(importances)
    minList = min(importances)
    for f in range(Data.shape[1]):
        ImpurityFS.append((importances[f] - minList) / (maxList - minList))
    estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
    selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
    selector = selector.fit(Data, yData)
    RFEImp = selector.ranking_
    for f in range(Data.shape[1]):
        if (RFEImp[f] == 1):
            RankingFS.append(0.95)
        elif (RFEImp[f] == 2):
            RankingFS.append(0.85)
        elif (RFEImp[f] == 3):
            RankingFS.append(0.75)
        elif (RFEImp[f] == 4):
            RankingFS.append(0.65)
        elif (RFEImp[f] == 5):
            RankingFS.append(0.55)
        elif (RFEImp[f] == 6):
            RankingFS.append(0.45)
        elif (RFEImp[f] == 7):
            RankingFS.append(0.35)
        elif (RFEImp[f] == 8):
            RankingFS.append(0.25)
        elif (RFEImp[f] == 9):
            RankingFS.append(0.15)
        else: 
            RankingFS.append(0.05)
    perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
    permList.append(perm.feature_importances_)
    n_feats = Data.shape[1]
    num_cores = multiprocessing.cpu_count()
    print("Parallelization Initilization")
    flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
    PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
    # for i in range(n_feats):
    #     scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
    #     PerFeatureAccuracy.append(scoresHere.mean())
    PerFeatureAccuracyAll.append(PerFeatureAccuracy)
    clf.fit(Data, yData) 
    yPredict = clf.predict(Data)
    yPredict = np.nan_to_num(yPredict)
    RankingFSDF = pd.DataFrame(RankingFS)
    RankingFSDF = RankingFSDF.to_json()
    ImpurityFSDF = pd.DataFrame(ImpurityFS)
    ImpurityFSDF = ImpurityFSDF.to_json()
    perm_imp_eli5PD = pd.DataFrame(permList)
    if (perm_imp_eli5PD.empty):
        for col in Data.columns:
            perm_imp_eli5PD.append({0:0})
    perm_imp_eli5PD = perm_imp_eli5PD.to_json()
    PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
    PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
    bestfeatures = SelectKBest(score_func=f_classif, k='all')
    fit = bestfeatures.fit(Data,yData)
    dfscores = pd.DataFrame(fit.scores_)
    dfcolumns = pd.DataFrame(Data.columns)
    featureScores = pd.concat([dfcolumns,dfscores],axis=1)
    featureScores.columns = ['Specs','Score']  #naming the dataframe columns
    featureScores = featureScores.to_json()
    resultsFS.append(featureScores) 
    resultsFS.append(ImpurityFSDF)
    resultsFS.append(perm_imp_eli5PD) 
    resultsFS.append(PerFeatureAccuracyPandas)
    resultsFS.append(RankingFSDF) 
    return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
    global featureImportanceData
    response = {    
        'Importance': featureImportanceData
    }
    return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
    global featureCompareData
    global columnsKeep
    response = {    
        'ImportanceCompare': featureCompareData,
        'FeatureNames': columnsKeep
    }
    return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
    scoresLoc = []
    temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
    scoresLoc.append(temp.mean())
    scoresLoc.append(temp.std())
    return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
    global scores
    response = {    
        'ValidResults': scores
    }
    return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
    # XDataNumericColumn = XData.select_dtypes(include='number')
    XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
    columns = list(XDataNumeric)  
    global packCorrTransformed
    packCorrTransformed = []
    for count, i in enumerate(columns): 
        dicTransf = {}
        
        splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            XDataNumericCopy[i] = XDataNumericCopy[i].round()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
            emptyLabels = []
            for index, number in enumerate(number_of_bins):
                if (index == 0):
                    pass
                else:
                    emptyLabels.append(index)
            XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
            XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')        
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')        
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)          
        splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            dfTemp = []
            dfTemp = np.log2(XDataNumericCopy[i])
            dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
            dfTemp = dfTemp.fillna(0)
            XDataNumericCopy[i] = dfTemp
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            dfTemp = []
            dfTemp = np.log1p(XDataNumericCopy[i])
            dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
            dfTemp = dfTemp.fillna(0)
            XDataNumericCopy[i] = dfTemp
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            dfTemp = []
            dfTemp = np.log10(XDataNumericCopy[i])
            dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
            dfTemp = dfTemp.fillna(0)
            XDataNumericCopy[i] = dfTemp
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')   
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            dfTemp = []
            dfTemp = np.exp2(XDataNumericCopy[i])
            dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
            dfTemp = dfTemp.fillna(0)
            XDataNumericCopy[i] = dfTemp
            if (np.isinf(dfTemp.var())):
                flagInf = True
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')   
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            dfTemp = []
            dfTemp = np.expm1(XDataNumericCopy[i])
            dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
            dfTemp = dfTemp.fillna(0)
            XDataNumericCopy[i] = dfTemp
            if (np.isinf(dfTemp.var())):
                flagInf = True
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')   
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')   
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')   
        if(len(splittedCol) == 1):
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        else:
            d={}
            flagInf = False
            XDataNumericCopy = XDataNumeric.copy()
            XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
            for number in range(1,6):
                quadrantVariable = str('quadrant%s' % number)
                illusion = locals()[quadrantVariable]
                d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
            dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
        packCorrTransformed.append(dicTransf)
    return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
    corrMatrix1 = DataRows1.corr()
    corrMatrix1 = corrMatrix1.abs()
    corrMatrix2 = DataRows2.corr()
    corrMatrix2 = corrMatrix2.abs()
    corrMatrix3 = DataRows3.corr()
    corrMatrix3 = corrMatrix3.abs()
    corrMatrix4 = DataRows4.corr()
    corrMatrix4 = corrMatrix4.abs()
    corrMatrix5 = DataRows5.corr()
    corrMatrix5 = corrMatrix5.abs()
    corrMatrix1 = corrMatrix1.loc[[feature]]
    corrMatrix2 = corrMatrix2.loc[[feature]]
    corrMatrix3  = corrMatrix3.loc[[feature]]
    corrMatrix4 = corrMatrix4.loc[[feature]]
    corrMatrix5 = corrMatrix5.loc[[feature]]
    DataRows1 = DataRows1.reset_index(drop=True)
    DataRows2 = DataRows2.reset_index(drop=True)
    DataRows3 = DataRows3.reset_index(drop=True)
    DataRows4 = DataRows4.reset_index(drop=True)
    DataRows5 = DataRows5.reset_index(drop=True)
    targetRows1 = [yData[i] for i in quadrant1] 
    targetRows2 = [yData[i] for i in quadrant2] 
    targetRows3 = [yData[i] for i in quadrant3] 
    targetRows4 = [yData[i] for i in quadrant4] 
    targetRows5 = [yData[i] for i in quadrant5] 
    targetRows1Arr = np.array(targetRows1)
    targetRows2Arr = np.array(targetRows2)
    targetRows3Arr = np.array(targetRows3)
    targetRows4Arr = np.array(targetRows4)
    targetRows5Arr = np.array(targetRows5)
    uniqueTarget1 = unique(targetRows1)
    uniqueTarget2 = unique(targetRows2)
    uniqueTarget3 = unique(targetRows3)
    uniqueTarget4 = unique(targetRows4)
    uniqueTarget5 = unique(targetRows5)
    if (len(targetRows1Arr) > 0):
        onehotEncoder1 = OneHotEncoder(sparse=False)
        targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
        onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
        hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
        concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
        corrMatrixComb1 = concatDF1.corr()
        corrMatrixComb1 = corrMatrixComb1.abs()
        corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
        DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
        DataRows1 = DataRows1.fillna(0)
        X1 = add_constant(DataRows1)
        X1 = X1.replace([np.inf, -np.inf], np.nan)
        X1 = X1.fillna(0)
        VIF1 = pd.Series([variance_inflation_factor(X1.values, i) 
            for i in range(X1.shape[1])], 
            index=X1.columns)
        if (flagInf == False):
            VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
            VIF1 = VIF1.fillna(0)
            VIF1 = VIF1.loc[[feature]]
        else:
            VIF1 = pd.Series()
        if ((len(targetRows1Arr) > 2) and (flagInf == False)):
            MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI1List = MI1.tolist()
            MI1List = MI1List[count]
        else:
            MI1List = []
    else:
        corrMatrixComb1 = pd.DataFrame()
        VIF1 = pd.Series()
        MI1List = []
    if (len(targetRows2Arr) > 0):
        onehotEncoder2 = OneHotEncoder(sparse=False)
        targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
        onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
        hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
        concatDF2 = pd.concat([DataRows2, hotEncoderDF2], axis=1)
        corrMatrixComb2 = concatDF2.corr()
        corrMatrixComb2 = corrMatrixComb2.abs()
        corrMatrixComb2 = corrMatrixComb2.iloc[:,-len(uniqueTarget2):]
        DataRows2 = DataRows2.replace([np.inf, -np.inf], np.nan)
        DataRows2 = DataRows2.fillna(0)
        X2 = add_constant(DataRows2)
        X2 = X2.replace([np.inf, -np.inf], np.nan)
        X2 = X2.fillna(0)
        VIF2 = pd.Series([variance_inflation_factor(X2.values, i) 
                for i in range(X2.shape[1])], 
                index=X2.columns)
        if (flagInf == False):
            VIF2 = VIF2.replace([np.inf, -np.inf], np.nan)
            VIF2 = VIF2.fillna(0)
            VIF2 = VIF2.loc[[feature]]
        else:
            VIF2 = pd.Series()
        if ((len(targetRows2Arr) > 2) and (flagInf == False)):
            MI2 = mutual_info_classif(DataRows2, targetRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI2List = MI2.tolist()
            MI2List = MI2List[count]
        else:
            MI2List = []
    else:
        corrMatrixComb2 = pd.DataFrame()
        VIF2 = pd.Series()
        MI2List = []
    if (len(targetRows3Arr) > 0):
        onehotEncoder3 = OneHotEncoder(sparse=False)
        targetRows3Arr = targetRows3Arr.reshape(len(targetRows3Arr), 1)
        onehotEncoder3 = onehotEncoder3.fit_transform(targetRows3Arr)
        hotEncoderDF3 = pd.DataFrame(onehotEncoder3)
        concatDF3 = pd.concat([DataRows3, hotEncoderDF3], axis=1)
        corrMatrixComb3 = concatDF3.corr()
        corrMatrixComb3 = corrMatrixComb3.abs()
        corrMatrixComb3 = corrMatrixComb3.iloc[:,-len(uniqueTarget3):]
        DataRows3 = DataRows3.replace([np.inf, -np.inf], np.nan)
        DataRows3 = DataRows3.fillna(0)
        X3 = add_constant(DataRows3)
        X3 = X3.replace([np.inf, -np.inf], np.nan)
        X3 = X3.fillna(0)
        if (flagInf == False):
            VIF3 = pd.Series([variance_inflation_factor(X3.values, i) 
                    for i in range(X3.shape[1])], 
                    index=X3.columns)
            VIF3 = VIF3.replace([np.inf, -np.inf], np.nan)
            VIF3 = VIF3.fillna(0)
            VIF3 = VIF3.loc[[feature]]
        else:
            VIF3 = pd.Series()
        if ((len(targetRows3Arr) > 2) and (flagInf == False)):
            MI3 = mutual_info_classif(DataRows3, targetRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI3List = MI3.tolist()
            MI3List = MI3List[count]
        else:
            MI3List = []
    else:
        corrMatrixComb3 = pd.DataFrame()
        VIF3 = pd.Series()
        MI3List = []
    if (len(targetRows4Arr) > 0):
        onehotEncoder4 = OneHotEncoder(sparse=False)
        targetRows4Arr = targetRows4Arr.reshape(len(targetRows4Arr), 1)
        onehotEncoder4 = onehotEncoder4.fit_transform(targetRows4Arr)
        hotEncoderDF4 = pd.DataFrame(onehotEncoder4)
        concatDF4 = pd.concat([DataRows4, hotEncoderDF4], axis=1)
        corrMatrixComb4 = concatDF4.corr()
        corrMatrixComb4 = corrMatrixComb4.abs()
        corrMatrixComb4 = corrMatrixComb4.iloc[:,-len(uniqueTarget4):]
        DataRows4 = DataRows4.replace([np.inf, -np.inf], np.nan)
        DataRows4 = DataRows4.fillna(0)
        X4 = add_constant(DataRows4)
        X4 = X4.replace([np.inf, -np.inf], np.nan)
        X4 = X4.fillna(0)
        if (flagInf == False):
            VIF4 = pd.Series([variance_inflation_factor(X4.values, i) 
                    for i in range(X4.shape[1])], 
                    index=X4.columns)
            VIF4 = VIF4.replace([np.inf, -np.inf], np.nan)
            VIF4 = VIF4.fillna(0)
            VIF4 = VIF4.loc[[feature]]
        else:
            VIF4 = pd.Series()
        if ((len(targetRows4Arr) > 2) and (flagInf == False)):
            MI4 = mutual_info_classif(DataRows4, targetRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI4List = MI4.tolist()
            MI4List = MI4List[count]
        else:
            MI4List = []
    else:
        corrMatrixComb4 = pd.DataFrame()
        VIF4 = pd.Series()
        MI4List = []
    if (len(targetRows5Arr) > 0):
        onehotEncoder5 = OneHotEncoder(sparse=False)
        targetRows5Arr = targetRows5Arr.reshape(len(targetRows5Arr), 1)
        onehotEncoder5 = onehotEncoder5.fit_transform(targetRows5Arr)
        hotEncoderDF5 = pd.DataFrame(onehotEncoder5)
        concatDF5 = pd.concat([DataRows5, hotEncoderDF5], axis=1)
        corrMatrixComb5 = concatDF5.corr()
        corrMatrixComb5 = corrMatrixComb5.abs()
        corrMatrixComb5 = corrMatrixComb5.iloc[:,-len(uniqueTarget5):]
        DataRows5 = DataRows5.replace([np.inf, -np.inf], np.nan)
        DataRows5 = DataRows5.fillna(0)
        X5 = add_constant(DataRows5)
        X5 = X5.replace([np.inf, -np.inf], np.nan)
        X5 = X5.fillna(0)
        if (flagInf == False):
            VIF5 = pd.Series([variance_inflation_factor(X5.values, i) 
                    for i in range(X5.shape[1])], 
                    index=X5.columns)
            VIF5 = VIF5.replace([np.inf, -np.inf], np.nan)
            VIF5 = VIF5.fillna(0)
            VIF5 = VIF5.loc[[feature]]
        else:
            VIF5 = pd.Series()
        if ((len(targetRows5Arr) > 2) and (flagInf == False)):
            MI5 = mutual_info_classif(DataRows5, targetRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI5List = MI5.tolist()
            MI5List = MI5List[count]
        else:
            MI5List = []
    else:
        corrMatrixComb5 = pd.DataFrame()
        VIF5 = pd.Series()
        MI5List = []
    if(corrMatrixComb1.empty):
        corrMatrixComb1 = pd.DataFrame()
    else:
        corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
    if(corrMatrixComb2.empty):
        corrMatrixComb2 = pd.DataFrame()
    else:
        corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
    if(corrMatrixComb3.empty):
        corrMatrixComb3 = pd.DataFrame()
    else:
        corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
    if(corrMatrixComb4.empty):
        corrMatrixComb4 = pd.DataFrame()
    else:
        corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
    if(corrMatrixComb5.empty):
        corrMatrixComb5 = pd.DataFrame()
    else:
        corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
    targetRows1ArrDF = pd.DataFrame(targetRows1Arr)
    targetRows2ArrDF = pd.DataFrame(targetRows2Arr)
    targetRows3ArrDF = pd.DataFrame(targetRows3Arr)
    targetRows4ArrDF = pd.DataFrame(targetRows4Arr)
    targetRows5ArrDF = pd.DataFrame(targetRows5Arr)
    concatAllDF1 = pd.concat([DataRows1, targetRows1ArrDF], axis=1)
    concatAllDF2 = pd.concat([DataRows2, targetRows2ArrDF], axis=1)
    concatAllDF3 = pd.concat([DataRows3, targetRows3ArrDF], axis=1)
    concatAllDF4 = pd.concat([DataRows4, targetRows4ArrDF], axis=1)
    concatAllDF5 = pd.concat([DataRows5, targetRows5ArrDF], axis=1)
    corrMatrixCombTotal1 = concatAllDF1.corr()
    corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
    corrMatrixCombTotal2 = concatAllDF2.corr()
    corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
    corrMatrixCombTotal3 = concatAllDF3.corr()
    corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
    corrMatrixCombTotal4 = concatAllDF4.corr()
    corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
    corrMatrixCombTotal5 = concatAllDF5.corr()
    corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
    corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
    corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
    corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
    corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
    corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
    corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
    corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
    corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
    corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
    corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
    corrMatrixCombTotal1 = pd.concat([corrMatrixCombTotal1.tail(1)])
    corrMatrixCombTotal2 = pd.concat([corrMatrixCombTotal2.tail(1)])
    corrMatrixCombTotal3 = pd.concat([corrMatrixCombTotal3.tail(1)])
    corrMatrixCombTotal4 = pd.concat([corrMatrixCombTotal4.tail(1)])
    corrMatrixCombTotal5 = pd.concat([corrMatrixCombTotal5.tail(1)])
    packCorrLoc = []
    packCorrLoc.append(corrMatrix1.to_json())
    packCorrLoc.append(corrMatrix2.to_json())
    packCorrLoc.append(corrMatrix3.to_json())
    packCorrLoc.append(corrMatrix4.to_json())
    packCorrLoc.append(corrMatrix5.to_json())
    packCorrLoc.append(corrMatrixComb1.to_json())
    packCorrLoc.append(corrMatrixComb2.to_json())
    packCorrLoc.append(corrMatrixComb3.to_json())
    packCorrLoc.append(corrMatrixComb4.to_json())
    packCorrLoc.append(corrMatrixComb5.to_json())
    packCorrLoc.append(corrMatrixCombTotal1.to_json())
    packCorrLoc.append(corrMatrixCombTotal2.to_json())
    packCorrLoc.append(corrMatrixCombTotal3.to_json())
    packCorrLoc.append(corrMatrixCombTotal4.to_json())
    packCorrLoc.append(corrMatrixCombTotal5.to_json())
    packCorrLoc.append(VIF1.to_json())
    packCorrLoc.append(VIF2.to_json())
    packCorrLoc.append(VIF3.to_json())
    packCorrLoc.append(VIF4.to_json())
    packCorrLoc.append(VIF5.to_json())
    packCorrLoc.append(json.dumps(MI1List))
    packCorrLoc.append(json.dumps(MI2List))
    packCorrLoc.append(json.dumps(MI3List))
    packCorrLoc.append(json.dumps(MI4List))
    packCorrLoc.append(json.dumps(MI5List))
    return packCorrLoc
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
    thresholds = request.get_data().decode('utf8').replace("'", '"')
    thresholds = json.loads(thresholds)
    thresholdsPos = thresholds['PositiveValue']
    thresholdsNeg = thresholds['NegativeValue']
    getCorrectPrediction = []
    for index, value in enumerate(yPredictProb):
        getCorrectPrediction.append(value[yData[index]]*100)
    quadrant1 = []
    quadrant2 = []
    quadrant3 = []
    quadrant4 = []
    quadrant5 = []
    probabilityPredictions = []
    for index, value in enumerate(getCorrectPrediction):
        if (value > 50 and value > thresholdsPos):
            quadrant1.append(index)
        elif (value > 50 and value <= thresholdsPos):
            quadrant2.append(index)
        elif (value <= 50 and value > thresholdsNeg):
            quadrant3.append(index)
        else:
            quadrant4.append(index)
        quadrant5.append(index)
        probabilityPredictions.append(value)
    # Main Features
    DataRows1 = XData.iloc[quadrant1, :]
    DataRows2 = XData.iloc[quadrant2, :]
    DataRows3 = XData.iloc[quadrant3, :]
    DataRows4 = XData.iloc[quadrant4, :]
    DataRows5 = XData.iloc[quadrant5, :]
    Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
    
    corrMatrix1 = DataRows1.corr()
    corrMatrix1 = corrMatrix1.abs()
    corrMatrix2 = DataRows2.corr()
    corrMatrix2 = corrMatrix2.abs()
    corrMatrix3 = DataRows3.corr()
    corrMatrix3 = corrMatrix3.abs()
    corrMatrix4 = DataRows4.corr()
    corrMatrix4 = corrMatrix4.abs()
    corrMatrix5 = DataRows5.corr()
    corrMatrix5 = corrMatrix5.abs()
    DataRows1 = DataRows1.reset_index(drop=True)
    DataRows2 = DataRows2.reset_index(drop=True)
    DataRows3 = DataRows3.reset_index(drop=True)
    DataRows4 = DataRows4.reset_index(drop=True)
    DataRows5 = DataRows5.reset_index(drop=True)
    targetRows1 = [yData[i] for i in quadrant1] 
    targetRows2 = [yData[i] for i in quadrant2] 
    targetRows3 = [yData[i] for i in quadrant3] 
    targetRows4 = [yData[i] for i in quadrant4] 
    targetRows5 = [yData[i] for i in quadrant5] 
    targetRows1Arr = np.array(targetRows1)
    targetRows2Arr = np.array(targetRows2)
    targetRows3Arr = np.array(targetRows3)
    targetRows4Arr = np.array(targetRows4)
    targetRows5Arr = np.array(targetRows5)
    uniqueTarget1 = unique(targetRows1)
    uniqueTarget2 = unique(targetRows2)
    uniqueTarget3 = unique(targetRows3)
    uniqueTarget4 = unique(targetRows4)
    uniqueTarget5 = unique(targetRows5)
    if (len(targetRows1Arr) > 0):
        onehotEncoder1 = OneHotEncoder(sparse=False)
        targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
        onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
        hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
        concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
        corrMatrixComb1 = concatDF1.corr()
        corrMatrixComb1 = corrMatrixComb1.abs()
        corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
        DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
        DataRows1 = DataRows1.fillna(0)
        X1 = add_constant(DataRows1)
        X1 = X1.replace([np.inf, -np.inf], np.nan)
        X1 = X1.fillna(0)
        VIF1 = pd.Series([variance_inflation_factor(X1.values, i) 
            for i in range(X1.shape[1])], 
            index=X1.columns)
        VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
        VIF1 = VIF1.fillna(0)
        if (len(targetRows1Arr) > 2):
            MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
            MI1List = MI1.tolist()
        else:
            MI1List = []
    else:
        corrMatrixComb1 = pd.DataFrame()
        VIF1 = pd.Series()
        MI1List = []
    if (len(targetRows2Arr) > 0):
        onehotEncoder2 = OneHotEncoder(sparse=False)
        targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
        onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
        hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
        concatDF2 =  
 | 
	pd.concat([DataRows2, hotEncoderDF2], axis=1) 
 | 
	pandas.concat 
 | 
					
	import pandas as pd
import numpy as np
import json
PROCESS_FILE_NAME_LIST = ["taxi_sort_01", "taxi_sort_001", "taxi_sort_002", "taxi_sort_003", "taxi_sort_004", "taxi_sort_005", "taxi_sort_006", "taxi_sort_007", "taxi_sort_008", "taxi_sort_009", "taxi_sort_0006", "taxi_sort_0007", "taxi_sort_0008", "taxi_sort_0009"]
PROCESS_FILE_SUFFIX_LIST  = [".csv" for _ in range(len(PROCESS_FILE_NAME_LIST))]
for process_file_name, process_file_suffix in zip(PROCESS_FILE_NAME_LIST, PROCESS_FILE_SUFFIX_LIST):
    df = pd.read_csv(process_file_name + process_file_suffix, index_col=False)
    df_precinct_center =  
 | 
	pd.read_csv("precinct_center.csv", index_col=False) 
 | 
	pandas.read_csv 
 | 
					
	import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
target = 'scale'
# IP
plot_mode = 'all_in_one'
obj = 'occ'
# Port
flow_dir = 'all'
port_dir = 'sys'
user_plot_pr = ['TCP']
user_plot_pr = ['UDP']
port_hist = pd.DataFrame({'A' : []})
user_port_hist = pd.DataFrame({'A' : []})
def acf(x, length=10):
  return np.array([1]+[np.corrcoef(x[:-i], x[i:])[0,1]  \
      for i in range(1, length)])
def scale_check(data_idx, plot=False):
    files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
    names = ['stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
 
    if files[data_idx] == 'real':
        df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
    elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
        df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0)) 
    else:
        df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None) 
        li = [df]
        for piece_idx in range(1, 5):
            df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
            li.append(df)
        df = pd.concat(li, axis=0, ignore_index=True)
    
    scale_list = []
    for col in ['byt', 'pkt']:
        scale_list.append(col)
        scale_list.append(str(np.min(df[col])))
        scale_list.append(str(np.log(np.max(df[col]))))
        scale_list.append(';')
    print(files[data_idx], ':', (' '.join(scale_list)))
def pr_distribution(data_idx, plot=False):
    files = ['stan','stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
    names = ['stan_fwd','stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
 
    if files[data_idx] == 'real':
        df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
    elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
        df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0)) 
    else:
        df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None) 
        li = [df]
        for piece_idx in range(1, 5):
            df =  
 | 
	pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0) 
 | 
	pandas.read_csv 
 | 
					
	# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through 
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import pandas as pd
import scipy 
from scipy import io
from scipy.io.wavfile import read as wavread
from scipy.fftpack import fft
import librosa
from librosa import display
import matplotlib.pyplot as plt 
from glob import glob
import sklearn
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import pathlib
import sonicboom
from joblib import Parallel, delayed
# %% [markdown]
# ## Read and add filepaths to original UrbanSound metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom
#Initialize empty dataframes to later enable saving the images into their respective folders
train =  
 | 
	pd.DataFrame() 
 | 
	pandas.DataFrame 
 | 
					
	'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import pandas as pd
import pydash as ps
import shutil
DATA_AGG_FNS = {
    't': 'sum',
    'reward': 'sum',
    'loss': 'mean',
    'explore_var': 'mean',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with any reward mean
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.get_logger(__name__)
'''
Fitness analysis
'''
def calc_strength(aeb_df, rand_epi_reward, std_epi_reward):
    '''
    For each episode, use the total rewards to calculate the strength as
    strength_epi = (reward_epi - reward_rand) / (reward_std - reward_rand)
    **Properties:**
    - random agent has strength 0, standard agent has strength 1.
    - if an agent achieve x2 rewards, the strength is ~x2, and so on.
    - strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards)
    - scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gets rescaled.
    This allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
    '''
    # use lower clip 0 for noise in reward to dip slighty below rand
    return (aeb_df['reward'] - rand_epi_reward).clip(0.) / (std_epi_reward - rand_epi_reward)
def calc_stable_idx(aeb_df, min_strength_ma):
    '''Calculate the index (epi) when strength first becomes stable (using moving mean and working backward)'''
    above_std_strength_sr = (aeb_df['strength_ma'] >= min_strength_ma)
    if above_std_strength_sr.any():
        # if it achieved stable (ma) min_strength_ma at some point, the index when
        std_strength_ra_idx = above_std_strength_sr.idxmax()
        stable_idx = std_strength_ra_idx - (MA_WINDOW - 1)
    else:
        stable_idx = np.nan
    return stable_idx
def calc_std_strength_timestep(aeb_df):
    '''
    Calculate the timestep needed to achieve stable (within NOISE_WINDOW) std_strength.
    For agent failing to achieve std_strength 1, it is meaningless to measure speed or give false interpolation, so set as inf (never).
    '''
    std_strength = 1.
    stable_idx = calc_stable_idx(aeb_df, min_strength_ma=std_strength - NOISE_WINDOW)
    if np.isnan(stable_idx):
        std_strength_timestep = np.inf
    else:
        std_strength_timestep = aeb_df.loc[stable_idx, 'total_t'] / std_strength
    return std_strength_timestep
def calc_speed(aeb_df, std_timestep):
    '''
    For each session, measure the moving average for strength with interval = 100 episodes.
    Next, measure the total timesteps up to the first episode that first surpasses standard strength, allowing for noise of 0.05.
    Finally, calculate speed as
    speed = timestep_std / timestep_solved
    **Properties:**
    - random agent has speed 0, standard agent has speed 1.
    - if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower.
    - the speed of learning agent always tends toward positive regardless of the shape of the rewards curve
    - the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps.
    For agent failing to achieve standard strength 1, it is meaningless to measure speed or give false interpolation, so the speed is 0.
    This allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
    '''
    agent_timestep = calc_std_strength_timestep(aeb_df)
    speed = std_timestep / agent_timestep
    return speed
def is_noisy_mono_inc(sr):
    '''Check if sr is monotonically increasing, (given NOISE_WINDOW = 5%) within noise = 5% * std_strength = 0.05 * 1'''
    zero_noise = -NOISE_WINDOW
    mono_inc_sr = np.diff(sr) >= zero_noise
    # restore sr to same length
    mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan)
    return mono_inc_sr
def calc_stability(aeb_df):
    '''
    Find a baseline =
    - 0. + noise for very weak solution
    - max(strength_ma_epi) - noise for partial solution weak solution
    - 1. - noise for solution achieving standard strength and beyond
    So we get:
    - weak_baseline = 0. + noise
    - strong_baseline = min(max(strength_ma_epi), 1.) - noise
    - baseline = max(weak_baseline, strong_baseline)
    Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonically increasing.
    Calculate stability as
    stability = #epi_>= / #epi_+
    **Properties:**
    - stable agent has value 1, unstable agent < 1, and non-solution = 0.
    - allows for drops strength MA of 5% to account for noise, which is invariant to the scale of rewards
    - if strength is monotonically increasing (with 5% noise), then it is stable
    - sharp gain in strength is considered stable
    - monotonically increasing implies strength can keep growing and as long as it does not fall much, it is considered stable
    '''
    weak_baseline = 0. + NOISE_WINDOW
    strong_baseline = min(aeb_df['strength_ma'].max(), 1.) - NOISE_WINDOW
    baseline = max(weak_baseline, strong_baseline)
    stable_idx = calc_stable_idx(aeb_df, min_strength_ma=baseline)
    if np.isnan(stable_idx):
        stability = 0.
    else:
        stable_df = aeb_df.loc[stable_idx:, 'strength_mono_inc']
        stability = stable_df.sum() / len(stable_df)
    return stability
def calc_consistency(aeb_fitness_df):
    '''
    Calculate the consistency of trial by the fitness_vectors of its sessions:
    consistency = ratio of non-outlier vectors
    **Properties:**
    - outliers are calculated using MAD modified z-score
    - if all the fitness vectors are zero or all strength are zero, consistency = 0
    - works for all sorts of session fitness vectors, with the standard scale
    When an agent fails to achieve standard strength, it is meaningless to measure consistency or give false interpolation, so consistency is 0.
    '''
    fitness_vecs = aeb_fitness_df.values
    if ~np.any(fitness_vecs) or ~np.any(aeb_fitness_df['strength']):
        # no consistency if vectors all 0
        consistency = 0.
    elif len(fitness_vecs) == 2:
        # if only has 2 vectors, check norm_diff
        diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(len(fitness_vecs[0])))
        consistency = diff_norm <= NOISE_WINDOW
    else:
        is_outlier_arr = util.is_outlier(fitness_vecs)
        consistency = (~is_outlier_arr).sum() / len(is_outlier_arr)
    return consistency
def calc_epi_reward_ma(aeb_df):
    '''Calculates the episode reward moving average with the MA_WINDOW'''
    rewards = aeb_df['reward']
    aeb_df['reward_ma'] = rewards.rolling(window=MA_WINDOW, min_periods=0, center=False).mean()
    return aeb_df
def calc_fitness(fitness_vec):
    '''
    Takes a vector of qualifying standardized dimensions of fitness and compute the normalized length as fitness
    L2 norm because it diminishes lower values but amplifies higher values for comparison.
    '''
    if isinstance(fitness_vec, pd.Series):
        fitness_vec = fitness_vec.values
    elif isinstance(fitness_vec, pd.DataFrame):
        fitness_vec = fitness_vec.iloc[0].values
    std_fitness_vector = np.ones(len(fitness_vec))
    fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(std_fitness_vector)
    return fitness
def calc_aeb_fitness_sr(aeb_df, env_name):
    '''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
    no_fitness_sr = pd.Series({
        'strength': 0., 'speed': 0., 'stability': 0.})
    if len(aeb_df) < MA_WINDOW:
        logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness')
        return no_fitness_sr
    std = FITNESS_STD.get(env_name)
    if std is None:
        std = FITNESS_STD.get('template')
        logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
    aeb_df['total_t'] = aeb_df['t'].cumsum()
    aeb_df['strength'] = calc_strength(aeb_df, std['rand_epi_reward'], std['std_epi_reward'])
    aeb_df['strength_ma'] = aeb_df['strength'].rolling(MA_WINDOW).mean()
    aeb_df['strength_mono_inc'] = is_noisy_mono_inc(aeb_df['strength']).astype(int)
    strength = aeb_df['strength_ma'].max()
    speed = calc_speed(aeb_df, std['std_timestep'])
    stability = calc_stability(aeb_df)
    aeb_fitness_sr = pd.Series({
        'strength': strength, 'speed': speed, 'stability': stability})
    return aeb_fitness_sr
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
    '''Save spec to proper path. Called at Experiment or Trial init.'''
    prepath = util.get_prepath(spec, info_space, unit)
    util.write(spec, f'{prepath}_spec.json')
def calc_mean_fitness(fitness_df):
    '''Method to calculated mean over all bodies for a fitness_df'''
    return fitness_df.mean(axis=1, level=3)
def get_session_data(session):
    '''
    Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
    @returns {dict, dict} session_mdp_data, session_data
    '''
    session_data = {}
    for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
        session_data[aeb] = body.df.copy()
    return session_data
def calc_session_fitness_df(session, session_data):
    '''Calculate the session fitness df'''
    session_fitness_data = {}
    for aeb in session_data:
        aeb_df = session_data[aeb]
        aeb_df = calc_epi_reward_ma(aeb_df)
        util.downcast_float32(aeb_df)
        body = session.aeb_space.body_space.data[aeb]
        aeb_fitness_sr = calc_aeb_fitness_sr(aeb_df, body.env.name)
        aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[session.index])
        aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS[:3], axis=1)
        session_fitness_data[aeb] = aeb_fitness_df
    # form multi_index df, then take mean across all bodies
    session_fitness_df =  
 | 
	pd.concat(session_fitness_data, axis=1) 
 | 
	pandas.concat 
 | 
					
	#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Implement classes for tracers,
to create points along the trajectories of given points.
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from . import data
from . import geodyn_analytical_flows
from . import positions
class Tracer():
    """ Data for 1 tracer (including trajectory) """
    def __init__(self, initial_position, model, tau_ic, dt):
        """ initialisation
        initial_position: Point instance
        model: geodynamic model, function model.trajectory_single_point is required
        """
        self.initial_position = initial_position
        self.model = model  # geodynamic model
        try:
            self.model.trajectory_single_point
        except NameError:
            print(
                "model.trajectory_single_point is required, please check the input model: {}".format(model))
        point = [initial_position.x, initial_position.y, initial_position.z]
        self.crystallization_time = self.model.crystallisation_time(point, tau_ic)
        num_t = max(2, math.floor((tau_ic - self.crystallization_time) / dt))
        # print(tau_ic, self.crystallization_time, num_t)
        self.num_t = num_t
        if num_t ==0:
            print("oups")
        # need to find cristallisation time of the particle
        # then calculate the number of steps, based on the required dt
        # then calculate the trajectory
        else:
            self.traj_x, self.traj_y, self.traj_z = self.model.trajectory_single_point(
                self.initial_position, tau_ic,  self.crystallization_time, num_t)
            self.time = np.linspace(tau_ic, self.crystallization_time, num_t)
            self.position = np.zeros((num_t, 3))
            self.velocity = np.zeros((num_t, 3))
            self.velocity_gradient = np.zeros((num_t, 9))
    def spherical(self):
        for index, (time, x, y, z) in enumerate(
                zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
            point = positions.CartesianPoint(x, y, z)
            r, theta, phi = point.r, point.theta, point.phi
            grad = self.model.gradient_spherical(r, theta, phi, time)
            self.position[index, :] = [r, theta, phi]
            self.velocity[index, :] = [self.model.u_r(r, theta, time), self.model.u_theta(r, theta, time), self.model.u_phi(r, theta, time)]
            self.velocity_gradient[index, :] = grad.flatten()
    def cartesian(self):
        """ Compute the outputs for cartesian coordinates """
        for index, (time, x, y, z) in enumerate(
                zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
            point = positions.CartesianPoint(x, y, z)
            r, theta, phi = point.r, point.theta, point.phi
            x, y, z = point.x, point.y, point.z
            vel = self.model.velocity(time, [x, y, z]) # self.model.velocity_cartesian(r, theta, phi, time)
            grad = self.model.gradient_cartesian(r, theta, phi, time)
            self.position[index, :] = [x, y, z]
            self.velocity[index, :] = vel[:]
            self.velocity_gradient[index, :] = grad.flatten()
    def output_spher(self, i):
        list_i = i * np.ones_like(self.time)
        data_i = pd.DataFrame(data=list_i, columns=["i"])
        data_time = pd.DataFrame(data=self.time, columns=["time"])
        dt = np.append(np.abs(np.diff(self.time)), [0])
        data_dt = pd.DataFrame(data=dt, columns=["dt"])
        data_pos = pd.DataFrame(data=self.position, columns=["r", "theta", "phi"])
        data_velo = pd.DataFrame(data=self.velocity, columns=["v_r", "v_theta", "v_phi"])
        data_strain = pd.DataFrame(data=self.velocity_gradient, columns=["dvr/dr", "dvr/dtheta", "dvr/dphi", "dvr/dtheta", "dvtheta/dtheta", "dvtheta/dphi","dvphi/dr", "dvphi/dtheta", "dvphi/dphi"])
        data = pd.concat([data_i, data_time, data_dt, data_pos, data_velo, data_strain], axis=1)
        return data
        #data.to_csv("tracer.csv", sep=" ", index=False)
    def output_cart(self, i):
        list_i = i * np.ones_like(self.time)
        data_i = pd.DataFrame(data=list_i, columns=["i"])
        data_time = pd.DataFrame(data=self.time, columns=["time"])
        dt = np.append([0], np.diff(self.time))
        data_dt = pd.DataFrame(data=dt, columns=["dt"])
        data_pos = pd.DataFrame(data=self.position, columns=["x", "y", "z"])
        data_velo = pd.DataFrame(data=self.velocity, columns=["v_x", "v_y", "v_z"])
        data_strain =  
 | 
	pd.DataFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"]) 
 | 
	pandas.DataFrame 
 | 
					
	#!/usr/bin/env python
import sys, time, code
import numpy as np
import pickle as pickle
from pandas import DataFrame, read_pickle, get_dummies, cut
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from djeval import *
def shell():
    vars = globals()
    vars.update(locals())
    shell = code.InteractiveConsole(vars)
    shell.interact()
def fix_colname(cn):
    return cn.translate(None, ' ()[],')
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
# clean up column names
colnames = list(yy_df.columns.values)
colnames = [fix_colname(cn) for cn in colnames]
yy_df.columns = colnames
# change the gamenum and side from being part of the index to being normal columns
yy_df.reset_index(inplace=True)
msg("Getting subset ready.")
# TODO save the dummies along with yy_df
categorical_features = ['opening_feature']
dummies =  
 | 
	get_dummies(yy_df[categorical_features]) 
 | 
	pandas.get_dummies 
 | 
					
	import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
    """
    Wrapper function to estimate rolling sum.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series sum over the past 'window' days.
    """
    
    return df.rolling(window).sum()
def ts_prod(df, window=10):
    """
    Wrapper function to estimate rolling product.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series product over the past 'window' days.
    """
    
    return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
    """
    Wrapper function to estimate SMA.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series SMA over the past 'window' days.
    """
    return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
    """
    Wrapper function to estimate EMA.
    :param df: a pandas DataFrame.
    :return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
    """   
    result = df.copy()
    for i in range(1,len(df)):
        result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
    return result
def wma(df, n):
    """
    Wrapper function to estimate WMA.
    :param df: a pandas DataFrame.
    :return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
    """   
    weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
    result = pd.Series(np.nan, index=df.index)
    for i in range(n-1,len(df)):
        result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
    return result
def stddev(df, window=10):
    """
    Wrapper function to estimate rolling standard deviation.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series min over the past 'window' days.
    """
    return df.rolling(window).std()
def correlation(x, y, window=10):
    """
    Wrapper function to estimate rolling corelations.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series min over the past 'window' days.
    """
    return x.rolling(window).corr(y)
def covariance(x, y, window=10):
    """
    Wrapper function to estimate rolling covariance.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series min over the past 'window' days.
    """
    return x.rolling(window).cov(y)
def rolling_rank(na):
    """
    Auxiliary function to be used in pd.rolling_apply
    :param na: numpy array.
    :return: The rank of the last value in the array.
    """
    return rankdata(na)[-1]
def ts_rank(df, window=10):
    """
    Wrapper function to estimate rolling rank.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series rank over the past window days.
    """
    return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
    """
    Auxiliary function to be used in pd.rolling_apply
    :param na: numpy array.
    :return: The product of the values in the array.
    """
    return np.prod(na)
def product(df, window=10):
    """
    Wrapper function to estimate rolling product.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series product over the past 'window' days.
    """
    return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
    """
    Wrapper function to estimate rolling min.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series min over the past 'window' days.
    """
    return df.rolling(window).min()
def ts_max(df, window=10):
    """
    Wrapper function to estimate rolling min.
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: a pandas DataFrame with the time-series max over the past 'window' days.
    """
    return df.rolling(window).max()
def delta(df, period=1):
    """
    Wrapper function to estimate difference.
    :param df: a pandas DataFrame.
    :param period: the difference grade.
    :return: a pandas DataFrame with today’s value minus the value 'period' days ago.
    """
    return df.diff(period)
def delay(df, period=1):
    """
    Wrapper function to estimate lag.
    :param df: a pandas DataFrame.
    :param period: the lag grade.
    :return: a pandas DataFrame with lagged time series
    """
    return df.shift(period)
def rank(df):
    """
    Cross sectional rank
    :param df: a pandas DataFrame.
    :return: a pandas DataFrame with rank along columns.
    """
    #return df.rank(axis=1, pct=True)
    return df.rank(pct=True)
def scale(df, k=1):
    """
    Scaling time serie.
    :param df: a pandas DataFrame.
    :param k: scaling factor.
    :return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
    """
    return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
    """
    Wrapper function to estimate which day ts_max(df, window) occurred on
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: well.. that :)
    """
    return df.rolling(window).apply(np.argmax) + 1 
def ts_argmin(df, window=10):
    """
    Wrapper function to estimate which day ts_min(df, window) occurred on
    :param df: a pandas DataFrame.
    :param window: the rolling window.
    :return: well.. that :)
    """
    return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
    """
    Linear weighted moving average implementation.
    :param df: a pandas DataFrame.
    :param period: the LWMA period
    :return: a pandas DataFrame with the LWMA.
    """
    try:
        df = df.to_frame()  #Series is not supported for the calculations below.
    except:
        pass
    # Clean data
    if df.isnull().values.any():
        df.fillna(method='ffill', inplace=True)
        df.fillna(method='bfill', inplace=True)
        df.fillna(value=0, inplace=True)
    na_lwma = np.zeros_like(df)
    na_lwma[:period, :] = df.iloc[:period, :] 
    na_series = df.values
    divisor = period * (period + 1) / 2
    y = (np.arange(period) + 1) * 1.0 / divisor
    # Estimate the actual lwma with the actual close.
    # The backtest engine should assure to be snooping bias free.
    for row in range(period - 1, df.shape[0]):
        x = na_series[row - period + 1: row + 1, :]
        na_lwma[row, :] = (np.dot(x.T, y))
    return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])  
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
    result = pd.Series(np.nan, index=df.index)
    for i in range(n,len(df)):
        result.iloc[i]= i - df[i-n:i].idxmax()
    return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
    result = pd.Series(np.nan, index=df.index)
    for i in range(n,len(df)):
        result.iloc[i]= i - df[i-n:i].idxmin()
    return result    
def daily_panel_csv_initializer(csv_name):  #not used now
    if os.path.exists(csv_name)==False:
        stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
        date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
        dataset=0
        for date in date_list["TRADE_DATE"]:
            stock_list[date]=stock_list["INDUSTRY"]
        stock_list.drop("INDUSTRY",axis=1,inplace=True)
        stock_list.set_index("TS_CODE", inplace=True)
        dataset = pd.DataFrame(stock_list.stack())
        dataset.reset_index(inplace=True)
        dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
        dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
    else:
        dataset=pd.read_csv(csv_name)
    return dataset
def IndustryAverage_vwap():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average vwap data needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average vwap data needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average vwap data is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1) 
            result_unaveraged_piece = VWAP
            
            result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["VWAP_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
def IndustryAverage_close():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average close data needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average close data needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average close data is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
   
            CLOSE = quotations_daily_chosen['CLOSE']  
            result_unaveraged_piece = CLOSE
            
            result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["CLOSE_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
def IndustryAverage_low():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average low data needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average low data needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average low data is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            LOW = quotations_daily_chosen['LOW']  
            result_unaveraged_piece = LOW
            
            result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["LOW_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
def IndustryAverage_volume():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average volume data needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average volume data needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average volume data is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VOLUME = quotations_daily_chosen['VOL']*100   
            result_unaveraged_piece = VOLUME
            
            result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["VOLUME_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
def IndustryAverage_adv(num):         
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
            return result_industryaveraged_df
        else:
            print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average adv{num} data is missing.".format(num=num))
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VOLUME = quotations_daily_chosen['VOL']*100  
            result_unaveraged_piece = sma(VOLUME, num)
            
            result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')           
    return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average data for alpha048 needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average data for alpha048 needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average dataset for alpha048 is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            CLOSE = quotations_daily_chosen['CLOSE'] 
            result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
            
            result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average data for alpha059 needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average data for alpha059 needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average dataset for alpha059 is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1) 
            result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
            result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average data for alpha079 needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average data for alpha079 needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average dataset for alpha079 is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            OPEN = quotations_daily_chosen['OPEN']
            CLOSE = quotations_daily_chosen['CLOSE']
            result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
            result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed = pd.Series(result_industryaveraged_df.index)
        date_list_update = date_list[~date_list.isin(date_list_existed)]
        if len(date_list_update)==0:
            print("The corresponding industry average data for alpha080 needs not to be updated.")
            return result_industryaveraged_df
        else:
            print("The corresponding industry average data for alpha080 needs to be updated.")
            first_date_update = date_list_update[0]
    except:
        print("The corresponding industry average dataset for alpha080 is missing.")
        result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
        date_list_update = date_list
        first_date_update=0
    
    #building/updating dataset
    result_unaveraged_industry=0 
    for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
        stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
        #calculating unindentralized data
        for ts_code in stock_list_industry.index:
            quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True) 
            quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
            quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
            
            try:    #valid only in updating
                index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
                first_date_needed = date_list_existed.loc[index_first_date_needed]
                quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
            except:
                pass
 
            OPEN = quotations_daily_chosen['OPEN']
            HIGH = quotations_daily_chosen['HIGH']
            result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
            result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
            result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
            result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
            result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
            result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
            
            result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
            
            if type(result_unaveraged_industry)==int:
                result_unaveraged_industry=result_unaveraged_piece
            else:
                result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)    
        
        #indentralizing data
        for date in date_list_update:
            try:   #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
                result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]              
                value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].mean()
                result_industryaveraged_df.loc[date,industry]=value
            except:
                pass
        result_unaveraged_industry=0
        
    result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')           
    return result_industryaveraged_df
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():          
    stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
    industry_list=stock_list["INDUSTRY"].drop_duplicates()
    date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
    
    #check for building/updating/reading dataset
    try:
        result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
        result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)        
        result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
        date_list_existed =  
 | 
	pd.Series(result_industryaveraged_df.index) 
 | 
	pandas.Series 
 | 
					
End of preview. Expand
						in Data Studio
					
	README.md exists but content is empty.
								
- Downloads last month
 - 77