Commit 9bd68573 authored by Mark McDonald's avatar Mark McDonald Committed by A. Unique TensorFlower
Browse files

Remove `git clone` instruction.

There is no actual command here, and everything is downloaded, so the instruction is redundant.

PiperOrigin-RevId: 447913568
parent c6f0446d
...@@ -37,14 +37,14 @@ ...@@ -37,14 +37,14 @@
"id": "Lpb0yoNjiWhw" "id": "Lpb0yoNjiWhw"
}, },
"source": [ "source": [
"\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", "<table class=\"tfo-notebook-buttons\" align=\"left\">\n",
" \u003ctd\u003e\n", " <td>\n",
" \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/models/blob/master/official/projects/cots_detector/COTS_detection_inference_and_tracking.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", " <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/models/blob/master/official/projects/cots_detector/COTS_detection_inference_and_tracking.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
" \u003c/td\u003e\n", " </td>\n",
" \u003ctd\u003e\n", " <td>\n",
" \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/official/projects/cots_detector/COTS_detection_inference_and_tracking.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView on GitHub\u003c/a\u003e\n", " <a target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/official/projects/cots_detector/COTS_detection_inference_and_tracking.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n",
" \u003c/td\u003e\n", " </td>\n",
"\u003c/table\u003e" "</table>"
] ]
}, },
{ {
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
"\n", "\n",
"This notebook tutorial shows how to detect COTS using a pre-trained COTS detector implemented in TensorFlow. On top of just running the model on each frame of the video, the tracking code in this notebook aligns detections from frame to frame creating a consistent track for each COTS. Each track is given an id and frame count. Here is an example image from a video of a reef showing labeled COTS starfish.\n", "This notebook tutorial shows how to detect COTS using a pre-trained COTS detector implemented in TensorFlow. On top of just running the model on each frame of the video, the tracking code in this notebook aligns detections from frame to frame creating a consistent track for each COTS. Each track is given an id and frame count. Here is an example image from a video of a reef showing labeled COTS starfish.\n",
"\n", "\n",
"\u003cimg src=\"https://storage.googleapis.com/download.tensorflow.org/data/cots_detection/COTS_detected_sample.png\"\u003e" "<img src=\"https://storage.googleapis.com/download.tensorflow.org/data/cots_detection/COTS_detected_sample.png\">"
] ]
}, },
{ {
...@@ -77,17 +77,7 @@ ...@@ -77,17 +77,7 @@
"id": "YxCF1t-Skag8" "id": "YxCF1t-Skag8"
}, },
"source": [ "source": [
"It is recommended to enable GPU to accelerate the inference. On CPU, this runs for about 40 minutes, but on GPU it takes only 10 minutes. (from colab menu: *Runtime \u003e Change runtime type \u003e Hardware accelerator \u003e select \"GPU\"*)." "It is recommended to enable GPU to accelerate the inference. On CPU, this runs for about 40 minutes, but on GPU it takes only 10 minutes. (from colab menu: *Runtime > Change runtime type > Hardware accelerator > select \"GPU\"*)."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6AL2vLo3q39y"
},
"source": [
"#Setting up the environment\n",
"Clone the inference pipeline respository from Github."
] ]
}, },
{ {
...@@ -296,7 +286,7 @@ ...@@ -296,7 +286,7 @@
" overlap_y0 = max(self.y0, other.y0)\n", " overlap_y0 = max(self.y0, other.y0)\n",
" overlap_x1 = min(self.x1, other.x1)\n", " overlap_x1 = min(self.x1, other.x1)\n",
" overlap_y1 = min(self.y1, other.y1)\n", " overlap_y1 = min(self.y1, other.y1)\n",
" if overlap_x0 \u003c overlap_x1 and overlap_y0 \u003c overlap_y1:\n", " if overlap_x0 < overlap_x1 and overlap_y0 < overlap_y1:\n",
" overlap_area = box_area(overlap_x0, overlap_y0, overlap_x1,\n", " overlap_area = box_area(overlap_x0, overlap_y0, overlap_x1,\n",
" overlap_y1)\n", " overlap_y1)\n",
" return overlap_area / (self.area() + other.area() - overlap_area)\n", " return overlap_area / (self.area() + other.area() - overlap_area)\n",
...@@ -387,8 +377,8 @@ ...@@ -387,8 +377,8 @@
" image_h = image.shape[0]\n", " image_h = image.shape[0]\n",
"\n", "\n",
" # Assume tracker is invalid if too much time has passed!\n", " # Assume tracker is invalid if too much time has passed!\n",
" if (self.prev_time \u003e 0 and\n", " if (self.prev_time > 0 and\n",
" timestamp - self.prev_time \u003e self.time_threshold):\n", " timestamp - self.prev_time > self.time_threshold):\n",
" logging.info(\n", " logging.info(\n",
" 'Too much time since last update, resetting tracker.')\n", " 'Too much time since last update, resetting tracker.')\n",
" self.tracks = []\n", " self.tracks = []\n",
...@@ -398,14 +388,14 @@ ...@@ -398,14 +388,14 @@
" # - Have existed for a long time without linking a real detection.\n", " # - Have existed for a long time without linking a real detection.\n",
" active_tracks = []\n", " active_tracks = []\n",
" for track in self.tracks:\n", " for track in self.tracks:\n",
" if (track.det.x0 \u003c self.border or track.det.y0 \u003c self.border or\n", " if (track.det.x0 < self.border or track.det.y0 < self.border or\n",
" track.det.x1 \u003e= (image_w - self.border) or\n", " track.det.x1 >= (image_w - self.border) or\n",
" track.det.y1 \u003e= (image_h - self.border)):\n", " track.det.y1 >= (image_h - self.border)):\n",
" logging.info(f'Removing track {track.id} because it\\'s near the border')\n", " logging.info(f'Removing track {track.id} because it\\'s near the border')\n",
" continue\n", " continue\n",
"\n", "\n",
" time_since_last_detection = timestamp - track.linked_dets[-1].timestamp\n", " time_since_last_detection = timestamp - track.linked_dets[-1].timestamp\n",
" if (time_since_last_detection \u003e self.track_flow_time):\n", " if (time_since_last_detection > self.track_flow_time):\n",
" logging.info(f'Removing track {track.id} because it\\'s too old '\n", " logging.info(f'Removing track {track.id} because it\\'s too old '\n",
" f'({time_since_last_detection:.02f}s)')\n", " f'({time_since_last_detection:.02f}s)')\n",
" continue\n", " continue\n",
...@@ -415,7 +405,7 @@ ...@@ -415,7 +405,7 @@
" self.tracks = active_tracks\n", " self.tracks = active_tracks\n",
"\n", "\n",
" # Run optical flow to update existing tracks.\n", " # Run optical flow to update existing tracks.\n",
" if self.prev_time \u003e 0:\n", " if self.prev_time > 0:\n",
" # print('Running optical flow propagation.')\n", " # print('Running optical flow propagation.')\n",
" of_params = {\n", " of_params = {\n",
" 'winSize': self.of_size,\n", " 'winSize': self.of_size,\n",
...@@ -442,9 +432,9 @@ ...@@ -442,9 +432,9 @@
" detected_obj_track_ids = set()\n", " detected_obj_track_ids = set()\n",
"\n", "\n",
" for detection in detections:\n", " for detection in detections:\n",
" if (detection.x0 \u003c self.border or detection.y0 \u003c self.border or\n", " if (detection.x0 < self.border or detection.y0 < self.border or\n",
" detection.x1 \u003e= image_w - self.border or\n", " detection.x1 >= image_w - self.border or\n",
" detection.y1 \u003e= image_h - self.border):\n", " detection.y1 >= image_h - self.border):\n",
" # print('Skipping detection because it\\'s close to the border.')\n", " # print('Skipping detection because it\\'s close to the border.')\n",
" continue\n", " continue\n",
"\n", "\n",
...@@ -457,12 +447,12 @@ ...@@ -457,12 +447,12 @@
" if track.det.class_id != detection.class_id:\n", " if track.det.class_id != detection.class_id:\n",
" continue\n", " continue\n",
" overlap = detection.iou(track.det)\n", " overlap = detection.iou(track.det)\n",
" if overlap \u003e overlap_max:\n", " if overlap > overlap_max:\n",
" overlap_index = track_index\n", " overlap_index = track_index\n",
" overlap_max = overlap\n", " overlap_max = overlap\n",
"\n", "\n",
" # Link to existing track with maximal IoU.\n", " # Link to existing track with maximal IoU.\n",
" if overlap_max \u003e self.overlap_threshold:\n", " if overlap_max > self.overlap_threshold:\n",
" track = self.tracks[overlap_index]\n", " track = self.tracks[overlap_index]\n",
" track.det = detection\n", " track.det = detection\n",
" track.linked_dets.append(Tracklet(timestamp, detection))\n", " track.linked_dets.append(Tracklet(timestamp, detection))\n",
...@@ -486,7 +476,7 @@ ...@@ -486,7 +476,7 @@
" self.prev_image = image\n", " self.prev_image = image\n",
" self.prev_time = timestamp\n", " self.prev_time = timestamp\n",
"\n", "\n",
" if num_optical_flow_calls \u003e 0:\n", " if num_optical_flow_calls > 0:\n",
" tracking_ms = int(1000 * (time.time() - start))\n", " tracking_ms = int(1000 * (time.time() - start))\n",
" logging.info(f'Tracking took {tracking_ms}ms, '\n", " logging.info(f'Tracking took {tracking_ms}ms, '\n",
" f'{num_optical_flow_calls} optical flow calls')\n", " f'{num_optical_flow_calls} optical flow calls')\n",
...@@ -511,7 +501,7 @@ ...@@ -511,7 +501,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"def embed_video_file(path: os.PathLike) -\u003e display.HTML:\n", "def embed_video_file(path: os.PathLike) -> display.HTML:\n",
" \"\"\"Embeds a file in the notebook as an html tag with a data-url.\"\"\"\n", " \"\"\"Embeds a file in the notebook as an html tag with a data-url.\"\"\"\n",
" path = pathlib.Path(path)\n", " path = pathlib.Path(path)\n",
" mime, unused_encoding = mimetypes.guess_type(str(path))\n", " mime, unused_encoding = mimetypes.guess_type(str(path))\n",
...@@ -520,10 +510,10 @@ ...@@ -520,10 +510,10 @@
" b64 = base64.b64encode(data).decode()\n", " b64 = base64.b64encode(data).decode()\n",
" return display.HTML(\n", " return display.HTML(\n",
" textwrap.dedent(\"\"\"\n", " textwrap.dedent(\"\"\"\n",
" \u003cvideo width=\"640\" height=\"480\" controls\u003e\n", " <video width=\"640\" height=\"480\" controls>\n",
" \u003csource src=\"data:{mime};base64,{b64}\" type=\"{mime}\"\u003e\n", " <source src=\"data:{mime};base64,{b64}\" type=\"{mime}\">\n",
" Your browser does not support the video tag.\n", " Your browser does not support the video tag.\n",
" \u003c/video\u003e\n", " </video>\n",
" \"\"\").format(mime=mime, b64=b64))\n" " \"\"\").format(mime=mime, b64=b64))\n"
] ]
}, },
...@@ -668,7 +658,7 @@ ...@@ -668,7 +658,7 @@
" if str(track.id) not in seq_length_dict:\n", " if str(track.id) not in seq_length_dict:\n",
" seq_length_dict[str(track.id)] = len(track.linked_dets)\n", " seq_length_dict[str(track.id)] = len(track.linked_dets)\n",
" else:\n", " else:\n",
" if len(track.linked_dets) \u003e seq_length_dict[str(track.id)]:\n", " if len(track.linked_dets) > seq_length_dict[str(track.id)]:\n",
" seq_length_dict[str(track.id)] = len(track.linked_dets)\n", " seq_length_dict[str(track.id)] = len(track.linked_dets)\n",
" new_track_list.append({\"score\":str(round(track.det.score, 3)), \"seq_id\": str(track.id), \"seq_idx\": str(len(track.linked_dets)),\n", " new_track_list.append({\"score\":str(round(track.det.score, 3)), \"seq_id\": str(track.id), \"seq_idx\": str(len(track.linked_dets)),\n",
" \"x0\": round(track.det.x0), \"y0\": round(track.det.y0), \"x1\": round(track.det.x1), \"y1\": round(track.det.y1)})\n", " \"x0\": round(track.det.x0), \"y0\": round(track.det.y0), \"x1\": round(track.det.x1), \"y1\": round(track.det.y1)})\n",
...@@ -700,7 +690,7 @@ ...@@ -700,7 +690,7 @@
" batch_size, img_h, img_w = image.shape[0:3]\n", " batch_size, img_h, img_w = image.shape[0:3]\n",
"\n", "\n",
" for batch_index in range(batch_size):\n", " for batch_index in range(batch_size):\n",
" valid_indices = detection_scores[batch_index, :] \u003e= threshold\n", " valid_indices = detection_scores[batch_index, :] >= threshold\n",
" classes = detection_classes[batch_index, valid_indices]\n", " classes = detection_classes[batch_index, valid_indices]\n",
" scores = detection_scores[batch_index, valid_indices]\n", " scores = detection_scores[batch_index, valid_indices]\n",
" boxes = detection_boxes[batch_index, valid_indices, :]\n", " boxes = detection_boxes[batch_index, valid_indices, :]\n",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment