Skip to content

Commit 799a024

Browse files
author
Matt Sokoloff
committed
yapf
1 parent a333ed2 commit 799a024

File tree

2 files changed

+71
-49
lines changed

2 files changed

+71
-49
lines changed

examples/model_assisted_labeling/image_mal_utils.py

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,33 +4,44 @@
44
from io import BytesIO
55
from PIL import Image
66

7+
78
def visualize_bbox_ndjsons(image, bbox_ndjsons, color):
89
for bbox_ndjson in bbox_ndjsons:
910
bbox = bbox_ndjson['bbox']
1011
start_point = (int(bbox['left']), int(bbox['top']))
11-
end_point = (int(bbox['left'] + bbox['width']), int(bbox['top'] + bbox['height']))
12-
image = cv2.rectangle(image, start_point, end_point, thickness = 2, color = color)
12+
end_point = (int(bbox['left'] + bbox['width']),
13+
int(bbox['top'] + bbox['height']))
14+
image = cv2.rectangle(image,
15+
start_point,
16+
end_point,
17+
thickness=2,
18+
color=color)
1319
return image
1420

21+
1522
def visualize_poly_ndjsons(image, poly_ndjsons, color):
1623
for poly_ndjson in poly_ndjsons:
1724
pts = [[poly['x'], poly['y']] for poly in poly_ndjson['polygon']]
1825
pts = np.array(pts).astype(np.int32)
19-
image = cv2.polylines(image,[pts],True,color, thickness = 2)
26+
image = cv2.polylines(image, [pts], True, color, thickness=2)
2027
return image
2128

29+
2230
def visualize_point_ndjsons(image, point_ndjsons, color):
2331
for point_ndjson in point_ndjsons:
2432
point = point_ndjson['point']
25-
image = cv2.circle(image, (point['x'],point['y']), radius=10, color=color, thickness=-1)
33+
image = cv2.circle(image, (point['x'], point['y']),
34+
radius=10,
35+
color=color,
36+
thickness=-1)
2637
return image
2738

39+
2840
def visualize_mask_ndjsons(image, mask_ndjsons):
2941
masks = []
3042
for ndjson in mask_ndjsons:
3143
instanceURI = ndjson['mask']['instanceURI']
32-
masks.append(np.array(Image.open(BytesIO(requests.get(instanceURI).content))))
33-
masks = np.sum(masks, axis = 0).astype(np.uint8)
44+
masks.append(
45+
np.array(Image.open(BytesIO(requests.get(instanceURI).content))))
46+
masks = np.sum(masks, axis=0).astype(np.uint8)
3447
return cv2.addWeighted(image, 0.7, masks, 0.3, 0)
35-
36-

examples/model_assisted_labeling/image_model.py

Lines changed: 52 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -4,30 +4,45 @@
44

55
#https://colab.research.google.com/github/tensorflow/tpu/blob/master/models/official/mask_rcnn/mask_rcnn_demo.ipynb#scrollTo=2oZWLz4xXsyQ
66

7-
class_mappings = {1: 'person', 3 : 'car', 28: 'umbrella', 31: 'handbag'}
7+
class_mappings = {1: 'person', 3: 'car', 28: 'umbrella', 31: 'handbag'}
88
session = tf.compat.v1.Session()
99

10+
1011
def load_model():
1112
saved_model_dir = 'gs://cloud-tpu-checkpoints/mask-rcnn/1555659850'
12-
_ = tf.compat.v1.saved_model.loader.load(session, ['serve'], saved_model_dir)
13+
_ = tf.compat.v1.saved_model.loader.load(session, ['serve'],
14+
saved_model_dir)
1315

1416

1517
def predict(np_image_string, min_score, height, width):
1618
num_detections, detection_boxes, detection_classes, detection_scores, detection_masks, image_info = session.run(
17-
['NumDetections:0', 'DetectionBoxes:0', 'DetectionClasses:0', 'DetectionScores:0', 'DetectionMasks:0', 'ImageInfo:0'],
18-
feed_dict={'Placeholder:0': np_image_string})
19+
[
20+
'NumDetections:0', 'DetectionBoxes:0', 'DetectionClasses:0',
21+
'DetectionScores:0', 'DetectionMasks:0', 'ImageInfo:0'
22+
],
23+
feed_dict={'Placeholder:0': np_image_string})
1924
num_detections = np.squeeze(num_detections.astype(np.int32), axis=(0,))
2025
detection_scores = np.squeeze(detection_scores, axis=(0,))[0:num_detections]
21-
response = {
22-
'boxes' : np.squeeze(detection_boxes * image_info[0, 2], axis=(0,))[0:num_detections],
23-
'class_indices' : np.squeeze(detection_classes.astype(np.int32), axis=(0,))[0:num_detections],
26+
response = {
27+
'boxes':
28+
np.squeeze(detection_boxes * image_info[0, 2], axis=(0,))
29+
[0:num_detections],
30+
'class_indices':
31+
np.squeeze(detection_classes.astype(np.int32), axis=(0,))
32+
[0:num_detections],
2433
}
2534
ymin, xmin, ymax, xmax = np.split(response['boxes'], 4, axis=-1)
2635
instance_masks = np.squeeze(detection_masks, axis=(0,))[0:num_detections]
27-
processed_boxes = np.concatenate([xmin, ymin, xmax - xmin, ymax - ymin], axis=-1)
28-
response.update({'seg_masks' : generate_segmentation_from_masks(instance_masks, processed_boxes, height, width)})
36+
processed_boxes = np.concatenate([xmin, ymin, xmax - xmin, ymax - ymin],
37+
axis=-1)
38+
response.update({
39+
'seg_masks':
40+
generate_segmentation_from_masks(instance_masks, processed_boxes,
41+
height, width)
42+
})
2943
keep_indices = detection_scores > min_score
30-
keep_indices = keep_indices & np.isin(response['class_indices'], list(class_mappings.keys()))
44+
keep_indices = keep_indices & np.isin(response['class_indices'],
45+
list(class_mappings.keys()))
3146
for key in response:
3247
response[key] = response[key][keep_indices]
3348
return response
@@ -54,6 +69,7 @@ def expand_boxes(boxes, scale):
5469

5570
return boxes_exp
5671

72+
5773
def generate_segmentation_from_masks(masks,
5874
detected_boxes,
5975
image_height,
@@ -74,7 +90,6 @@ def generate_segmentation_from_masks(masks,
7490
the instance masks *pasted* on the image canvas.
7591
"""
7692

77-
7893
_, mask_height, mask_width = masks.shape
7994
scale = max((mask_width + 2.0) / mask_width,
8095
(mask_height + 2.0) / mask_height)
@@ -84,37 +99,33 @@ def generate_segmentation_from_masks(masks,
8499
padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)
85100
segms = []
86101
for mask_ind, mask in enumerate(masks):
87-
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
88-
if is_image_mask:
89-
# Process whole-image masks.
90-
im_mask[:, :] = mask[:, :]
91-
else:
92-
# Process mask inside bounding boxes.
93-
padded_mask[1:-1, 1:-1] = mask[:, :]
94-
95-
ref_box = ref_boxes[mask_ind, :]
96-
w = ref_box[2] - ref_box[0] + 1
97-
h = ref_box[3] - ref_box[1] + 1
98-
w = np.maximum(w, 1)
99-
h = np.maximum(h, 1)
100-
101-
mask = cv2.resize(padded_mask, (w, h))
102-
mask = np.array(mask > 0.5, dtype=np.uint8)
103-
104-
x_0 = max(ref_box[0], 0)
105-
x_1 = min(ref_box[2] + 1, image_width)
106-
y_0 = max(ref_box[1], 0)
107-
y_1 = min(ref_box[3] + 1, image_height)
108-
109-
im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), (
110-
x_0 - ref_box[0]):(x_1 - ref_box[0])]
111-
segms.append(im_mask)
102+
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
103+
if is_image_mask:
104+
# Process whole-image masks.
105+
im_mask[:, :] = mask[:, :]
106+
else:
107+
# Process mask inside bounding boxes.
108+
padded_mask[1:-1, 1:-1] = mask[:, :]
109+
110+
ref_box = ref_boxes[mask_ind, :]
111+
w = ref_box[2] - ref_box[0] + 1
112+
h = ref_box[3] - ref_box[1] + 1
113+
w = np.maximum(w, 1)
114+
h = np.maximum(h, 1)
115+
116+
mask = cv2.resize(padded_mask, (w, h))
117+
mask = np.array(mask > 0.5, dtype=np.uint8)
118+
119+
x_0 = max(ref_box[0], 0)
120+
x_1 = min(ref_box[2] + 1, image_width)
121+
y_0 = max(ref_box[1], 0)
122+
y_1 = min(ref_box[3] + 1, image_height)
123+
124+
im_mask[y_0:y_1,
125+
x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]),
126+
(x_0 - ref_box[0]):(x_1 - ref_box[0])]
127+
segms.append(im_mask)
112128

113129
segms = np.array(segms)
114130
assert masks.shape[0] == segms.shape[0]
115131
return segms
116-
117-
118-
119-
120-

0 commit comments

Comments
 (0)