File size: 14,000 Bytes
89c5d90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
""" Utilities

"""
import numpy as np
import time
from termcolor import colored
import os,glob
import h5py

def get_model_trained_name(sess=None,data_dir=None, last_update=True):

    if sess is not None or data_dir is not None:
        path = os.path.join(data_dir,'*.ckpt')
        if last_update:
            mfiles= glob.glob(path)
            last_file=max(mfiles,key=os.path.getctime)
            return os.path.join(path,last_file)
        else:
            print('Not performet yet')

def gamma_correction(i, g,gamma=True):
    """Gamma correction
    This function is for gamma corrections and de-correction 0.4040 0.3030 0.6060
    :param i: image data
    :param g: gamma value
    :param gamma: if true do gamma correction if does not degamma correction
    :return:if gamma gamma corrected image else image without gamma correction
    """
    i = np.float32(i)
    if gamma:
        img=i**g
    else:
        img=i**(1/g)
    return img

def image_normalization(img, img_min=0, img_max=255):
    """ Image normalization given a minimum and maximum

    This is a typical image normalization function
    where the minimum and maximum of the image is needed
    source: https://en.wikipedia.org/wiki/Normalization_(image_processing)
    :param img: an image could be gray scale or color
    :param img_min:  for default is 0
    :param img_max: for default is 255
    :return: a normalized image given a scale
    """
    img = np.float32(img)
    epsilon=1e-12 # whenever an inconsistent image
    img = (img-np.min(img))*(img_max-img_min)/((np.max(img)-np.min(img))+epsilon)+img_min
    return img


def normalization_data_0255(data):
    """
    data normalization in 0 till 1 range
    :param data:
    :return:
    """
    epsilon = 1e-12
    if not len(data.shape)==2:
        n_imgs = data.shape[0]
        # data = np.float32(data)
        if data.shape[-1]==3 and len(data.shape)==3:
            data = ((data - np.min(data)) * 255 / ((np.max(data) - np.min(data)) + epsilon))
            # data = ((data - np.min(data)) * 254 / (np.max(data) - np.min(data)))+1

        elif data.shape[-1]==3 and len(data.shape)==4:
            for i in range(n_imgs):
                img = data[i,...]
                data[i,:,:,:] = ((img - np.min(img)) * 255 / ((np.max(img) - np.min(img))+epsilon))
        # print("Data normalized with:", data.shape[-1], "channels")
        elif data.shape[-1]>3 or len(data.shape)>=4:
            print('error normalizin 0-255 line 30')
        else:
            print('error normalizin 0-255 line 32')

        return data
    elif len(data.shape)==2:
        # data = ((data-np.min(data))*255/(np.max(data)-np.min(data)))
        data = ((data-np.min(data))*255/((np.max(data) - np.min(data)) + epsilon))

        return data
    else:
        print('error normalization 0-255')

def normalization_data_01(data):
    """
    data normalization in 0 till 1 range
    :param data:
    :return:
    """
    epsilon = 1e-12
    if np.sum(np.isnan(data))>0:
        print('NaN detected before Normalization')
        return 'variable has NaN values'
    if len(data.shape)>3:
        n_imgs = data.shape[0]
        data = np.float32(data)
        if data.shape[-1]==3:
            for i in range(n_imgs):
                img = data[i,:,:,:]
                data[i,:,:,:] = ((img - np.min(img)) * 1 / ((np.max(img) - np.min(img))+epsilon))

        elif data.shape[-1]==4:
            print('it is a  little naive, check it in line 64 seg utils.py')
            for i in range(n_imgs):
                nir = data[i, :, :, -1]
                nir = ((nir - np.min(nir)) * 1 / ((np.max(nir) - np.min(nir)) + epsilon))
                img = data[i, :, :, 0:3]
                img = ((img - np.min(img)) * 1 / ((np.max(img) - np.min(img)) + epsilon))
                data[i, :, :, 0:3] = img
                data[i, :, :, -1] = nir
        elif data.shape[-1]==2:
            #normalization according to channels
            print('check line 70 utils_seg.py')
            for i in range(n_imgs):
                im = data[i,:,:,0]
                N = data[i,:,:,-1]
                data[i,:,:,0]= ((im-np.min(im))*1/(np.max(im)-np.min(im)))
                data[i, :, :, -1] = ((N - np.min(N)) * 1 / (np.max(N) - np.min(N)))

        elif data.shape[-1]==1:
            for i in range(n_imgs):
                img = data[i, :, :, 0]
                data[i, :, :, 0] = ((img - np.min(img)) * 1 / ((np.max(img) - np.min(img))+epsilon))
        else:
            print("error normalizing line 83")
        if np.sum(np.isnan(data)) > 0:
            print('NaN detected after normalization')
            return 'variable has NaN values'
        return data

    else:
        if np.max(data) ==0 and np.min(data)==0:
            return data
        if np.sum(np.isnan(data)) > 0:
            print('NaN detected before normalization')
            return 'variable has NaN values'

        data = ((data - np.min(data)) * 1 / ((np.max(data) - np.min(data))+epsilon))
        if np.sum(np.isnan(data)) > 0:
            print('NaN detected after normalization')
            return 'variable has NaN values'
        return data

# _________ text visualization ____________
def get_local_time():
    return time.strftime("%d %b %Y %Hh%Mm%Ss", time.localtime())

def print_info(info_string, quite=False):

    info = '[{0}][INFO]{1}'.format(get_local_time(), info_string)
    print(colored(info, 'green'))

def print_error(error_string):

    error = '[{0}][ERROR] {1}'.format(get_local_time(), error_string)
    print (colored(error, 'red'))

def print_warning(warning_string):

    warning = '[{0}][WARNING] {1}'.format(get_local_time(), warning_string)

    print (colored(warning, 'blue'))
# ___________ End text visualization

# ___________ read list of files
def read_files_list(list_path,dataset_name=None):
    mfiles = open(list_path)
    file_names = mfiles.readlines()
    mfiles.close()

    file_names = [f.strip() for f in file_names]
    return file_names

def split_pair_names(opts, file_names, base_dir=None):
    # If base_dir is None, it assume that the list have the complete image source
    if opts.model_state=='train':

        if base_dir==None:
            file_names =[c.split(' ') for c in file_names]
        else:
            if opts.train_dataset.lower()=='biped':
                x_base_dir=os.path.join(base_dir,'imgs',opts.model_state)
                y_base_dir =os.path.join(base_dir,'edge_maps',opts.model_state)
                file_names = [c.split(' ') for c in file_names]
                file_names = [(os.path.join(x_base_dir, c[0]),
                               os.path.join(y_base_dir, c[1])) for c in file_names]
            else:
                file_names = [c.split(' ') for c in file_names]
                file_names = [(os.path.join(base_dir, c[0]),
                               os.path.join(base_dir, c[1])) for c in file_names]
        return file_names
    else:
        # ******************* for data testing ****************************
        if base_dir == None:
            file_names = [c.split(' ') for c in file_names]
        else:
            if opts.test_dataset.lower() == 'biped':
                x_base_dir = os.path.join(base_dir,'imgs', opts.model_state)
                y_base_dir = os.path.join(base_dir,'edge_maps', opts.model_state)
                file_names = [c.split(' ') for c in file_names]
                file_names = [(os.path.join(x_base_dir, c[0]),
                               os.path.join(y_base_dir, c[1])) for c in file_names]
            else:
                file_names = [c.split(' ') for c in file_names]
                file_names = [(os.path.join(base_dir, c[0]),
                               os.path.join(base_dir, c[1])) for c in file_names]

        return file_names
# ____________ End reading files list

# _____________ H5 file manager _________

def h5_reader(path):
    """ Read H5 file
    Read .h5 file format data h5py <<.File>>
    :param path:file path of desired file
    :return: dataset -> contain images data for training;
    label -> contain  training label values (ground truth)
    """
    with h5py.File(path, 'r') as hf:
        n_variables = len(list(hf.keys()))
        # choice = True  # write
        if n_variables==3:
            data = np.array(hf.get('data'))
            label = np.array(hf.get('label'))
            test = np.array(hf.get('test'))
        elif n_variables==2:

            data = np.array(hf.get('data'))
            label = np.array(hf.get('label'))
            test=None
        elif n_variables == 1:
            data = np.array(hf.get('data'))
            label=None
            test=None
        else:
            data = None
            label = None
            test = None
            print("Error reading path: ",path)

        print(n_variables, " vars opened from: ", path)
        return data, label, test

def save_h5_data(savepath,data, label, predi = None, data_name=None,
                 label_name=None, predi_name=None):
    if data_name==None or label_name==None:
        if np.any(predi == None):

            with h5py.File(savepath, 'w') as hf:
                hf.create_dataset('data', data=data)
                hf.create_dataset('label', data=label)
                print("Data [", data.shape, "and label ", label.shape, "] saved in: ", savepath)
        else:
            with h5py.File(savepath, 'w') as hf:
                hf.create_dataset('data', data=data)
                hf.create_dataset('label', data=label)
                hf.create_dataset('predi', data=predi)
                print("Input data [", data.shape, ", label ", label.shape, "and predi ", predi.shape,"] saved in: ", savepath)


    else:
        with h5py.File(savepath, 'w') as hf:
            hf.create_dataset(data_name, data=data)
            hf.create_dataset(label_name, data=label)
            hf.create_dataset(predi_name,data=predi)
            print("[",data_name, data.shape, ", ",
                  label_name, label.shape," and ",predi_name, predi.shape, "] saved in: ", savepath)

def save_variable_h5(savepath, data):

    with h5py.File(savepath, 'w') as hf:
        hf.create_dataset('data', data=data)

        print("Data [", len(data), "] saved in: ", savepath)
# ___________ End h5 file manager ____________

# ____________ Restoring RGB former values _______

def restore_rgb(config,I):
    """
    :param config: [args.channel_swap, args.mean_pixel_value]
    :param I: and image or a set of images
    :return: an image or a set of images restored
    """
    if  len(I)>3 and not type(I)==np.ndarray:
        I =np.array(I)
        I = I[:,:,:,0:3]
        n = I.shape[0]
        for i in range(n):
            x = I[i,...]
            x = np.array(x, dtype=np.float32)
            x += config[1]
            x = x[:, :, config[0]]
            x = normalization_data_0255(x)
            I[i,:,:,:]=x
    elif len(I.shape)==3 and I.shape[-1]==3:
        I = np.array(I, dtype=np.float32)
        I += config[1]
        I = I[:, :, config[0]]
        I = normalization_data_0255(I)
    else:
        print_error("Sorry the input data size is out of our configuration")
    print_info("The enterely I data {} restored".format(I.shape))
    return I

def restore_edgemap(config,I):
    """ Not finished coding ***
    :param config: args.target_regression = True or False
    :param I:  input image data
    :return: restored image data
    """
    print_error("Sorry this function is not ready")
    if len(I.shape)>3 and I.shape[3]==1:
        n = I.shape[0]
        for i in range(n):
            y=I[i,...]
    elif len(I.shape)==3 and I.shape[-1]==1:

        I = np.array(I.convert('L'), dtype=np.float32)
        if config:
            bin_I = I / 255.0
        else:
            bin_I = np.zeros_like(I)
            bin_I[np.where(I)] = 1

        bin_I = bin_I if bin_I.ndim == 2 else bin_I[:, :, 0]
        bin_y = np.expand_dims(bin_I, axis=2)
    else:
        print_error("Sorry the input data size is out of our configuration")
    return I

def tensor_norm_01(data):
    """
    tensor means that the size image is [batch-size,img_width, img_height, num_channels]
    :param data:
    :return:
    """
    data = np.array(data)
    if np.sum(np.isnan(data))>0:
        print('NaN detected before Normalization')
        return 'variable has NaN values'
    if len(data.shape)>3:
        n_imgs = data.shape[0]
        data = np.float32(data)
        if data.shape[-1]==3:
            for i in range(n_imgs):
                img = data[i,:,:,:]
                data[i,:,:,:] = image_normalization(img,img_min=0,img_max=1)

        elif data.shape[-1]==4:
            print('it is a  little naive, check it in line 64 seg utils.py')
            for i in range(n_imgs):
                nir = data[i, :, :, -1]
                nir = image_normalization(nir,img_min=0,img_max=1)
                img = data[i, :, :, 0:3]
                img = image_normalization(img,img_min=0,img_max=1)
                data[i, :, :, 0:3] = img
                data[i, :, :, -1] = nir
        elif data.shape[-1]==2:
            #normalization according to channels
            print('check line 70 utils_seg.py')
            for i in range(n_imgs):
                im = data[i,:,:,0]
                N = data[i,:,:,-1]
                data[i,:,:,0]= image_normalization(im,img_min=0,img_max=1)
                data[i, :, :, -1] = image_normalization(N,img_min=0,img_max=1)

        elif data.shape[-1]==1:
            x=[]
            for i in range(n_imgs):
                img = data[i, :, :, 0]
                img= image_normalization(img,img_min=0,img_max=1)
                x.append(img)
            data=x
        else:
            print("error normalizing line 83")
        if np.sum(np.isnan(data)) > 0:
            print('NaN detected after normalization')
            return 'variable has NaN values'
        return data
    else:
        print('Please use image_normalization() function')