-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
215 lines (168 loc) · 7.68 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
import os
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
import skimage.measure
# miscellaneous function for reading, writing and processing rgb and depth images.
def resizewithpool(img, size):
i_size = img.shape[0]
n = int(np.floor(i_size/size))
out = skimage.measure.block_reduce(img, (n, n), np.max)
return out
def showimage(img):
plt.imshow(img)
plt.colorbar()
plt.show()
def read_image(path):
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
def generatemask(size):
# Generates a Guassian mask
mask = np.zeros(size, dtype=np.float32)
sigma = int(size[0]/16)
k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1)
mask[int(0.15*size[0]):size[0] - int(0.15*size[0]), int(0.15*size[1]): size[1] - int(0.15*size[1])] = 1
mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)
mask = (mask - mask.min()) / (mask.max() - mask.min())
mask = mask.astype(np.float32)
return mask
def impatch(image, rect):
# Extract the given patch pixels from a given image.
w1 = rect[0]
h1 = rect[1]
w2 = w1 + rect[2]
h2 = h1 + rect[3]
image_patch = image[h1:h2, w1:w2]
return image_patch
def getGF_fromintegral(integralimage, rect):
# Computes the gradient density of a given patch from the gradient integral image.
x1 = rect[1]
x2 = rect[1]+rect[3]
y1 = rect[0]
y2 = rect[0]+rect[2]
value = integralimage[x2, y2]-integralimage[x1, y2]-integralimage[x2, y1]+integralimage[x1, y1]
return value
def rgb2gray(rgb):
# Converts rgb to gray
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
def calculateprocessingres(img, basesize, confidence=0.1, scale_threshold=3, whole_size_threshold=3000):
# Returns the R_x resolution described in section 5 of the main paper.
# Parameters:
# img :input rgb image
# basesize : size the dilation kernel which is equal to receptive field of the network.
# confidence: value of x in R_x; allowed percentage of pixels that are not getting any contextual cue.
# scale_threshold: maximum allowed upscaling on the input image ; it has been set to 3.
# whole_size_threshold: maximum allowed resolution. (R_max from section 6 of the main paper)
# Returns:
# outputsize_scale*speed_scale :The computed R_x resolution
# patch_scale: K parameter from section 6 of the paper
# speed scale parameter is to process every image in a smaller size to accelerate the R_x resolution search
speed_scale = 32
image_dim = int(min(img.shape[0:2]))
gray = rgb2gray(img)
grad = np.abs(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3))
grad = cv2.resize(grad, (image_dim, image_dim), cv2.INTER_AREA)
# thresholding the gradient map to generate the edge-map as a proxy of the contextual cues
m = grad.min()
M = grad.max()
middle = m + (0.4 * (M - m))
grad[grad < middle] = 0
grad[grad >= middle] = 1
# dilation kernel with size of the receptive field
kernel = np.ones((int(basesize/speed_scale), int(basesize/speed_scale)), np.float)
# dilation kernel with size of the a quarter of receptive field used to compute k
# as described in section 6 of main paper
kernel2 = np.ones((int(basesize / (4*speed_scale)), int(basesize / (4*speed_scale))), np.float)
# Output resolution limit set by the whole_size_threshold and scale_threshold.
threshold = min(whole_size_threshold, scale_threshold * max(img.shape[:2]))
outputsize_scale = basesize / speed_scale
for p_size in range(int(basesize/speed_scale), int(threshold/speed_scale), int(basesize / (2*speed_scale))):
grad_resized = resizewithpool(grad, p_size)
grad_resized = cv2.resize(grad_resized, (p_size, p_size), cv2.INTER_NEAREST)
grad_resized[grad_resized >= 0.5] = 1
grad_resized[grad_resized < 0.5] = 0
dilated = cv2.dilate(grad_resized, kernel, iterations=1)
meanvalue = (1-dilated).mean()
if meanvalue > confidence:
break
else:
outputsize_scale = p_size
grad_region = cv2.dilate(grad_resized, kernel2, iterations=1)
patch_scale = grad_region.mean()
return int(outputsize_scale*speed_scale), patch_scale
def applyGridpatch(blsize, stride, img, box):
# Extract a simple grid patch.
counter1 = 0
patch_bound_list = {}
for k in range(blsize, img.shape[1] - blsize, stride):
for j in range(blsize, img.shape[0] - blsize, stride):
patch_bound_list[str(counter1)] = {}
patchbounds = [j - blsize, k - blsize, j - blsize + 2 * blsize, k - blsize + 2 * blsize]
patch_bound = [box[0] + patchbounds[1], box[1] + patchbounds[0], patchbounds[3] - patchbounds[1],
patchbounds[2] - patchbounds[0]]
patch_bound_list[str(counter1)]['rect'] = patch_bound
patch_bound_list[str(counter1)]['size'] = patch_bound[2]
counter1 = counter1 + 1
return patch_bound_list
class Images:
def __init__(self, root_dir, files, index):
self.root_dir = root_dir
name = files[index]
self.rgb_image = read_image(os.path.join(self.root_dir, name))
name = name.replace(".jpg", "")
name = name.replace(".png", "")
name = name.replace(".jpeg", "")
self.name = name
class ImageandPatchs:
def __init__(self, root_dir, name, patchsinfo, rgb_image, scale=1):
self.root_dir = root_dir
self.patchsinfo = patchsinfo
self.name = name
self.patchs = patchsinfo
self.scale = scale
self.rgb_image = cv2.resize(rgb_image, (round(rgb_image.shape[1]*scale), round(rgb_image.shape[0]*scale)),
interpolation=cv2.INTER_CUBIC)
self.do_have_estimate = False
self.estimation_updated_image = None
self.estimation_base_image = None
def __len__(self):
return len(self.patchs)
def set_base_estimate(self, est):
self.estimation_base_image = est
if self.estimation_updated_image is not None:
self.do_have_estimate = True
def set_updated_estimate(self, est):
self.estimation_updated_image = est
if self.estimation_base_image is not None:
self.do_have_estimate = True
def __getitem__(self, index):
patch_id = int(self.patchs[index][0])
rect = np.array(self.patchs[index][1]['rect'])
msize = self.patchs[index][1]['size']
## applying scale to rect:
rect = np.round(rect * self.scale)
rect = rect.astype('int')
msize = round(msize * self.scale)
patch_rgb = impatch(self.rgb_image, rect)
if self.do_have_estimate:
patch_whole_estimate_base = impatch(self.estimation_base_image, rect)
patch_whole_estimate_updated = impatch(self.estimation_updated_image, rect)
return {'patch_rgb': patch_rgb, 'patch_whole_estimate_base': patch_whole_estimate_base,
'patch_whole_estimate_updated': patch_whole_estimate_updated, 'rect': rect,
'size': msize, 'id': patch_id}
else:
return {'patch_rgb': patch_rgb, 'rect': rect, 'size': msize, 'id': patch_id}
class ImageDataset:
def __init__(self, root_dir, subsetname):
self.dataset_dir = root_dir
self.subsetname = subsetname
self.rgb_image_dir = root_dir
self.files = sorted(os.listdir(self.rgb_image_dir))
def __len__(self):
return len(self.files)
def __getitem__(self, index):
return Images(self.rgb_image_dir, self.files, index)