Kinh nghiệm về re train và test dữ liệu khi sử dụng yolo
Test Manual
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# -*- coding: utf-8 -*-
"""Test_manual.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Q9g8NMMenCgCAMb67zBZUY5w2_ZI7YcJ
"""
from google.colab import drive
drive.mount('/content/drive')
# /content/drive/MyDrive/[2023_Contest]/sample_test
!pip install opencv-python
import cv2
import os
ROOT = '/content/drive/MyDrive/[2023_Contest]/sample_test/'
folder_images = ROOT + 'images/'
folder_labels = ROOT + 'labels/'
folder_output = ROOT + 'output/'
folder_output_images = folder_output + 'images/'
folder_output_labels = folder_output + 'labels/'
class Box:
def __init__(self, label, xc, yc, w, h):
self.label = label
self.xc = xc
self.yc = yc
self.w = w
self.h = h
def __str__(self):
return f"Box(label={self.label}, xc={self.xc}, yc={self.yc}, w={self.w}, h={self.h})"
my_dict = {}
for f in os.listdir(folder_labels):
if f.endswith('txt'):
file_path = os.path.join(folder_labels, f)
with open(file_path, 'r') as file:
list_person = []
for line in file:
values_list = line.strip().split()
obj = Box(values_list[0], values_list[1], values_list[2], values_list[3], values_list[4])
list_person.append(obj)
name = f.split('.')[0]
my_dict[name] = list_person
file.close()
def get_coeff(w, h):
if w == 8192 and h == 5460:
return 16, 12
else:
return 8, 6
#step 2
for f in os.listdir(folder_images):
name = f.split('.')[0]
list_obj = my_dict[name]
if f.endswith('JPG'):
file_path = os.path.join(folder_images, f)
image = cv2.imread(file_path)
#image_out = folder_output + 'draw_image_' + f
h, w, _ = image.shape
coeff_w, coeff_h = get_coeff(w, h)
w_split_image = int(w/coeff_w)
h_split_image = int(h/coeff_h)
cnt = 0
for o in list_obj:
cnt += 1
image_out_piece = folder_output_images + name + '_' + str(cnt) + '.jpg'
txt_out_piece = folder_output_labels + name + '_' + str(cnt) + '.txt'
print('image: ', name + '_' + str(cnt))
print('labels: ', txt_out_piece)
xc = float(o.xc) * w
yc = float(o.yc) * h
w_box = float(o.w) * w
h_box = float(o.h) * h
x0 = xc - w_box / 2
y0 = yc - h_box / 2
x = int(x0)
y = int(y0)
w_size = int(w_box)
h_size = int(h_box)
c = int(x / w_split_image)
r = int(y / h_split_image)
x_new = c * w_split_image
y_new = r * h_split_image
try:
roi = image[y_new : y_new + h_split_image, x_new : x_new + w_split_image]
piece_x = int(x%w_split_image)
piece_y = int(y%h_split_image)
piece_w_size = int(w_size)
piece_h_size = int(h_size)
#cv2.rectangle(roi, (piece_x, piece_y), (piece_x + piece_w_size, piece_y + piece_h_size), (0, 255, 0), 2)
x_out, y_out, w_out, h_out = (piece_x+piece_w_size/2) / w_split_image, (piece_y + piece_h_size/2) / h_split_image, piece_w_size/w_split_image, piece_h_size/h_split_image
if x_out <= 1.0 and y_out <= 1.0:
print(0, x_out, y_out, w_out, h_out)
cv2.imwrite(image_out_piece, roi)
# Open the file in write mode
file = open(txt_out_piece, "w")
test_str = '0' + ' ' + str(x_out) + ' ' + str(y_out) + ' ' + str(w_out) + ' ' + str(h_out)
file.write(test_str)
file.close()
else:
print('error detection')
except Exception as e:
print(f"An error occurred while writing the image: {str(e)}")
print('.....')
Load test model
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
# -*- coding: utf-8 -*-
"""Load_test_model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GdLGCpdcB5WkyPdoPLeKdi9sGnkltadj
"""
from google.colab import drive
drive.mount('/content/drive')
!pip install ultralytics==8.0.20
from IPython import display
display.clear_output()
import ultralytics
ultralytics.checks()
import ultralytics
from IPython import display
from ultralytics import YOLO
from IPython.display import display, Image
import cv2
import multiprocessing
import random
model = YOLO('/content/drive/MyDrive/[2023_Contest]/model/best_v8.pt')
class Box:
def __init__(self, image_id, xc, yc, w, h, label, score):
self.image_id = image_id
self.xc = xc
self.yc = yc
self.w = w
self.h = h
self.label = label
self.score = score
def __str__(self):
return f"{self.image_id}, {self.xc}, {self.yc}, {self.w}, {self.h}, {self.label}, {self.score}"
def get_coeff(w, h):
if w == 8192 and h == 5460:
return 16, 12
else:
return 8, 6
def generate_random_solution(f):
xc = random.random() * 0.8
yc = random.random() * 0.8
w = random.random() * 0.2
h = random.random() * 0.2
conf = random.random()
image_id = f.name[:-len(f.suffix)]
result = {
'image_id': image_id,
'xc': round(xc, 4),
'yc': round(yc, 4),
'w': round(w, 4),
'h': round(h, 4),
'label': 0,
'score': round(conf, 4)
}
return result
def sol(f):
try:
res = []
list_person = []
image_id = f.name[:-len(f.suffix)]
image_original = cv2.imread(str(f))
h, w, _ = image_original.shape
coeff_w, coeff_h = get_coeff(w, h)
w_split_image = int(w/coeff_w)
h_split_image = int(h/coeff_h)
for i in range(coeff_h):
for j in range(coeff_w):
x = j * w_split_image
y = i * h_split_image
roi = image_original[y : y + h_split_image, x : x + w_split_image]
results = model.predict(source=roi, conf=0.25)
if results[0].boxes.xyxy.size()[0] > 0:
for k in range(len(results[0].boxes.xyxy)):
o = results[0].boxes.xyxy[k]
original_x0 = int(j * w_split_image) + int(o[0])
original_y0 = int(i * h_split_image) + int(o[1])
original_x1 = int(j * w_split_image) + int(o[2])
original_y1 = int(i * h_split_image) + int(o[3])
out_w = (original_x1 - original_x0)
out_h = (original_y1 - original_y0)
out_x = (original_x0 + out_w/2)
out_y = (original_y0 + out_h/2)
out_x /= w
out_y /= h
out_w /= w
out_h /= h
out_x = round(out_x, 4)
out_y = round(out_y, 4)
out_w = round(out_w, 4)
out_h = round(out_h, 4)
score = float(results[0].boxes.conf[k])
score = round(score, 4)
#obj = Box(image_id, out_x, out_y, out_w, out_h, 0, score)
#print(obj)
result = {
'image_id': image_id,
'xc': out_x,
'yc': out_y,
'w': out_w,
'h': out_h,
'label': 0,
'score': score
}
#print(result)
res.append(result)
return res
except Exception as e:
res.append(generate_random_solution(f))
return res
from pathlib import Path
import pandas as pd
SAVE_PATH = '/content/drive/MyDrive/[2023_Contest]/sample_test/output.csv'
TEST_IMAGES_PATH ='/content/drive/MyDrive/[2023_Contest]/sample_test/images/'
def create_simple_solution():
results = []
for f in Path(TEST_IMAGES_PATH).glob('*.JPG'):
t = sol(f)
results.extend(t)
test_df = pd.DataFrame(results, columns=['image_id', 'xc', 'yc', 'w', 'h', 'label', 'score'])
test_df.to_csv(SAVE_PATH, index=False)
def main():
create_simple_solution()
import time
if __name__ == '__main__':
start_time = time.time()
main()
end_time = time.time()
print('time: ', round(end_time - start_time, 2))
!pip freeze | grep opencv
import cv2
import time
import threading
import concurrent.futures
class Box:
def __init__(self, image_id, xc, yc, w, h, label, score):
self.image_id = image_id
self.xc = xc
self.yc = yc
self.w = w
self.h = h
self.label = label
self.score = score
def __str__(self):
return f"{self.image_id}, {self.xc}, {self.yc}, {self.w}, {self.h}, {self.label}, {self.score}"
def get_coeff(w, h):
'''
if w == 8192 and h == 5460:
return 16, 12
else:
return 8, 6
'''
return 8, 6
def generate_random_solution(f):
xc = random.random() * 0.8
yc = random.random() * 0.8
w = random.random() * 0.2
h = random.random() * 0.2
conf = random.random()
image_id = f.name[:-len(f.suffix)]
result = {
'image_id': image_id,
'xc': round(xc, 4),
'yc': round(yc, 4),
'w': round(w, 4),
'h': round(h, 4),
'label': 0,
'score': round(conf, 4)
}
return result
def process_chunk(chunk):
results = model.predict(source=chunk, conf=0.25)
return results;
def sol(f):
try:
res = []
list_person = []
image_id = f.name[:-len(f.suffix)]
image_original = cv2.imread(str(f))
h, w, _ = image_original.shape
coeff_w, coeff_h = get_coeff(w, h)
w_split_image = int(w/coeff_w)
h_split_image = int(h/coeff_h)
image_chunks = []
for i in range(coeff_h):
for j in range(coeff_w):
x = j * w_split_image
y = i * h_split_image
roi = image_original[y : y + h_split_image, x : x + w_split_image]
image_chunks.append(roi)
## test
with concurrent.futures.ThreadPoolExecutor() as ex:
list_results = ex.map(process_chunk, image_chunks)
for r in list_results:
#print(r)
if r[0].boxes.xyxy.size()[0] > 0:
for k in range(len(r[0].boxes.xyxy)):
o = r[0].boxes.xyxy[k]
original_x0 = int(j * w_split_image) + int(o[0])
original_y0 = int(i * h_split_image) + int(o[1])
original_x1 = int(j * w_split_image) + int(o[2])
original_y1 = int(i * h_split_image) + int(o[3])
out_w = original_x1 - original_x0
out_h = original_y1 - original_y0
out_x = original_x0 + out_w / 2
out_y = original_y0 + out_h / 2
out_x /= w
out_y /= h
out_w /= w
out_h /= h
out_x = round(out_x, 4)
out_y = round(out_y, 4)
out_w = round(out_w, 4)
out_h = round(out_h, 4)
score = float(r[0].boxes.conf[k])
score = round(score, 4)
result = {
"image_id": image_id,
"xc": out_x,
"yc": out_y,
"w": out_w,
"h": out_h,
"label": 0,
"score": score,
}
print(result)
res.append(result)
return res
except Exception as e:
res.append(generate_random_solution(f))
return res
from pathlib import Path
import pandas as pd
SAVE_PATH = '/content/drive/MyDrive/[2023_Contest]/sample_test/output.csv'
TEST_IMAGES_PATH ='/content/drive/MyDrive/[2023_Contest]/sample_test/images/'
def create_simple_solution():
results = []
for f in Path(TEST_IMAGES_PATH).glob('*.JPG'):
t = sol(f)
results.extend(t)
test_df = pd.DataFrame(results, columns=['image_id', 'xc', 'yc', 'w', 'h', 'label', 'score'])
test_df.to_csv(SAVE_PATH, index=False)
def main():
create_simple_solution()
import time
if __name__ == '__main__':
start_time = time.time()
main()
end_time = time.time()
print('time: ', round(end_time - start_time, 2))
Editing YOLO
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
# -*- coding: utf-8 -*-
"""editing_yolo8.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qYvX8gAdk0wNG6aiHA8gzg0s73GLcE3V
[![Roboflow Notebooks](https://ik.imagekit.io/roboflow/notebooks/template/bannertest2-2.png?ik-sdk-version=javascript-1.4.3&updatedAt=1672932710194)](https://github.com/roboflow/notebooks)
# How to Train YOLOv8 Object Detection on a Custom Dataset
---
[![Roboflow](https://raw.githubusercontent.com/roboflow-ai/notebooks/main/assets/badges/roboflow-blogpost.svg)](https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset)
[![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/wuZtUMEiKWY)
[![GitHub](https://badges.aleen42.com/src/github.svg)](https://github.com/ultralytics/ultralytics)
Ultralytics YOLOv8 is the latest version of the YOLO (You Only Look Once) object detection and image segmentation model developed by Ultralytics. The YOLOv8 model is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and image segmentation tasks. It can be trained on large datasets and is capable of running on a variety of hardware platforms, from CPUs to GPUs.
## ⚠️ Disclaimer
YOLOv8 is still under heavy development. Breaking changes are being introduced almost weekly. We strive to make our YOLOv8 notebooks work with the latest version of the library. Last tests took place on **27.01.2023** with version **YOLOv8.0.20**.
If you notice that our notebook behaves incorrectly - especially if you experience errors that prevent you from going through the tutorial - don't hesitate! Let us know and open an [issue](https://github.com/roboflow/notebooks/issues) on the Roboflow Notebooks repository.
## Accompanying Blog Post
We recommend that you follow along in this notebook while reading the blog post on how to train YOLOv8 Object Detection, concurrently.
## Pro Tip: Use GPU Acceleration
If you are running this notebook in Google Colab, navigate to `Edit` -> `Notebook settings` -> `Hardware accelerator`, set it to `GPU`, and then click `Save`. This will ensure your notebook uses a GPU, which will significantly speed up model training times.
## Steps in this Tutorial
In this tutorial, we are going to cover:
- Before you start
- Install YOLOv8
- CLI Basics
- Inference with Pre-trained COCO Model
- Roboflow Universe
- Preparing a custom dataset
- Custom Training
- Validate Custom Model
- Inference with Custom Model
**Let's begin!**
## Before you start
Let's make sure that we have access to GPU. We can use `nvidia-smi` command to do that. In case of any problems navigate to `Edit` -> `Notebook settings` -> `Hardware accelerator`, set it to `GPU`, and then click `Save`.
"""
from google.colab import drive
drive.mount('/content/drive')
import zipfile
with zipfile.ZipFile('/content/drive/MyDrive/[2023_Contest]/datasets.zip', 'r') as zip_ref:
zip_ref.extractall('/content/')
# https://app.roboflow.com/project/drone-jphdu/1
print('hello')
# /content/drive/MyDrive/[2023 - yolov8]
# link hướng dẫn: https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb#scrollTo=BSd93ZJzZZKt
# labeling: https://app.roboflow.com/sut-ejqq7/drone-jphdu/annotate/job/QxyIublaJmd95I3m5Mig
!nvidia-smi
import os
HOME = os.getcwd()
print(HOME)
"""## Install YOLOv8
⚠️ YOLOv8 is still under heavy development. Breaking changes are being introduced almost weekly. We strive to make our YOLOv8 notebooks work with the latest version of the library. Last tests took place on **27.01.2023** with version **YOLOv8.0.20**.
If you notice that our notebook behaves incorrectly - especially if you experience errors that prevent you from going through the tutorial - don't hesitate! Let us know and open an [issue](https://github.com/roboflow/notebooks/issues) on the Roboflow Notebooks repository.
YOLOv8 can be installed in two ways - from the source and via pip. This is because it is the first iteration of YOLO to have an official package.
"""
# Pip install method (recommended)
!pip install ultralytics==8.0.20
from IPython import display
display.clear_output()
import ultralytics
ultralytics.checks()
# Git clone method (for development)
# %cd {HOME}
# !git clone github.com/ultralytics/ultralytics
# %cd {HOME}/ultralytics
# !pip install -e .
# from IPython import display
# display.clear_output()
# import ultralytics
# ultralytics.checks()
from ultralytics import YOLO
from IPython.display import display, Image
"""## CLI Basics
If you want to train, validate or run inference on models and don't need to make any modifications to the code, using YOLO command line interface is the easiest way to get started. Read more about CLI in [Ultralytics YOLO Docs](https://docs.ultralytics.com/usage/cli/).
yolo task=detect mode=train model=yolov8n.yaml args...
classify predict yolov8n-cls.yaml args...
segment val yolov8n-seg.yaml args...
export yolov8n.pt format=onnx args...
## Inference with Pre-trained COCO Model
### 💻 CLI
`yolo mode=predict` runs YOLOv8 inference on a variety of sources, downloading models automatically from the latest YOLOv8 release, and saving results to `runs/predict`.
"""
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
!yolo task=detect mode=predict model=yolov8n.pt conf=0.25 source='https://media.roboflow.com/notebooks/examples/dog.jpeg' save=True
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
Image(filename='runs/detect/predict/dog.jpeg', height=600)
"""### 🐍 Python SDK
The simplest way of simply using YOLOv8 directly in a Python environment.
"""
model = YOLO(f'{HOME}/yolov8n.pt')
results = model.predict(source='https://media.roboflow.com/notebooks/examples/dog.jpeg', conf=0.25)
results[0].boxes.xyxy
results[0].boxes.conf
results[0].boxes.cls
"""## Roboflow Universe
Need data for your project? Before spending time on annotating, check out Roboflow Universe, a repository of more than 110,000 open-source datasets that you can use in your projects. You'll find datasets containing everything from annotated cracks in concrete to plant images with disease annotations.
[![Roboflow Universe](https://ik.imagekit.io/roboflow/notebooks/template/uni-banner-frame.png?ik-sdk-version=javascript-1.4.3&updatedAt=1672878480290)](https://universe.roboflow.com/)
## Preparing a custom dataset
Building a custom dataset can be a painful process. It might take dozens or even hundreds of hours to collect images, label them, and export them in the proper format. Fortunately, Roboflow makes this process as straightforward and fast as possible. Let me show you how!
### Step 1: Creating project
Before you start, you need to create a Roboflow [account](https://app.roboflow.com/login). Once you do that, you can create a new project in the Roboflow [dashboard](https://app.roboflow.com/). Keep in mind to choose the right project type. In our case, Object Detection.
<div align="center">
<img
width="640"
src="https://ik.imagekit.io/roboflow/preparing-custom-dataset-example/creating-project.gif?ik-sdk-version=javascript-1.4.3&updatedAt=1672929799852"
>
</div>
### Step 2: Uploading images
Next, add the data to your newly created project. You can do it via API or through our [web interface](https://docs.roboflow.com/adding-data/object-detection).
If you drag and drop a directory with a dataset in a supported format, the Roboflow dashboard will automatically read the images and annotations together.
<div align="center">
<img
width="640"
src="https://ik.imagekit.io/roboflow/preparing-custom-dataset-example/uploading-images.gif?ik-sdk-version=javascript-1.4.3&updatedAt=1672929808290"
>
</div>
### Step 3: Labeling
If you only have images, you can label them in [Roboflow Annotate](https://docs.roboflow.com/annotate).
<div align="center">
<img
width="640"
src="https://user-images.githubusercontent.com/26109316/210901980-04861efd-dfc0-4a01-9373-13a36b5e1df4.gif"
>
</div>
### Step 4: Generate new dataset version
Now that we have our images and annotations added, we can Generate a Dataset Version. When Generating a Version, you may elect to add preprocessing and augmentations. This step is completely optional, however, it can allow you to significantly improve the robustness of your model.
<div align="center">
<img
width="640"
src="https://media.roboflow.com/preparing-custom-dataset-example/generate-new-version.gif?ik-sdk-version=javascript-1.4.3&updatedAt=1673003597834"
>
</div>
### Step 5: Exporting dataset
Once the dataset version is generated, we have a hosted dataset we can load directly into our notebook for easy training. Click `Export` and select the `YOLO v5 PyTorch` dataset format.
<div align="center">
<img
width="640"
src="https://ik.imagekit.io/roboflow/preparing-custom-dataset-example/export.gif?ik-sdk-version=javascript-1.4.3&updatedAt=1672943313709"
>
</div>
"""
# Commented out IPython magic to ensure Python compatibility.
!mkdir {HOME}/datasets
# %cd {HOME}/datasets
!pip install roboflow --quiet
'''
# old
from roboflow import Roboflow
rf = Roboflow(api_key="lCnfGZLdyt3hUOoOwlgv")
project = rf.workspace("sut-ejqq7").project("drone-jphdu")
dataset = project.version(1).download("yolov8")
'''
'''
from roboflow import Roboflow
rf = Roboflow(api_key="lCnfGZLdyt3hUOoOwlgv")
project = rf.workspace("sut-ejqq7").project("find-person-9oltm")
dataset = project.version(1).download("yolov8")
'''
from roboflow import Roboflow
rf = Roboflow(api_key="lCnfGZLdyt3hUOoOwlgv")
project = rf.workspace("sut-ejqq7").project("person-y0wvc")
dataset = project.version(1).download("yolov8")
"""## Custom Training"""
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
#dataset.location = '/content/datasets/person-1'
#!yolo task=detect mode=train model=yolov8s.pt data={dataset.location}/data.yaml epochs=25 imgsz=800 plots=True
#!yolo task=detect mode=train model=yolov8s.pt data={'/content/datasets/person-1'}/data.yaml epochs=25 plots=True
!yolo task=detect mode=train model=yolov8s.pt data={'/content/datasets/person-1'}/data.yaml epochs=100 imgsz=800 plots=True
!ls {HOME}/runs/detect/train/
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
Image(filename=f'{HOME}/runs/detect/train/confusion_matrix.png', width=600)
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
Image(filename=f'{HOME}/runs/detect/train/results.png', width=600)
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
Image(filename=f'{HOME}/runs/detect/train/val_batch0_pred.jpg', width=600)
"""## Validate Custom Model"""
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
!yolo task=detect mode=val model={HOME}/runs/detect/train/weights/best.pt data={'/content/datasets/person-1'}/data.yaml
"""## Inference with Custom Model"""
# Commented out IPython magic to ensure Python compatibility.
# %cd {HOME}
!yolo task=detect mode=predict model={HOME}/runs/detect/train/weights/best.pt conf=0.25 source={'/content/datasets/person-1'}/test/images save=True
dataset.location = '/content/datasets/person-1'
"""**NOTE:** Let's take a look at few results."""
import glob
from IPython.display import Image, display
for image_path in glob.glob(f'{HOME}/runs/detect/predict2/*.jpg')[:3]:
#display(Image(filename=image_path, width=600))
display(Image(filename=image_path, width=600))
print("\n")
"""## Deploy model on Roboflow
Once you have finished training your YOLOv8 model, you’ll have a set of trained weights ready for use. These weights will be in the `/runs/detect/train/weights/best.pt` folder of your project. You can upload your model weights to Roboflow Deploy to use your trained weights on our infinitely scalable infrastructure.
The `.deploy()` function in the [Roboflow pip package](https://docs.roboflow.com/python) now supports uploading YOLOv8 weights.
To upload model weights, add the following code to the “Inference with Custom Model” section in the aforementioned notebook:
"""
project.version(dataset.version).deploy(model_type="yolov8", model_path=f"{HOME}/runs/detect/train/")
#While your deployment is processing, checkout the deployment docs to take your model to most destinations https://docs.roboflow.com/inference
#Run inference on your model on a persistant, auto-scaling, cloud API
#load model
model = project.version(dataset.version).model
#choose random test set image
import os, random
test_set_loc = dataset.location + "/test/images/"
random_test_image = random.choice(os.listdir(test_set_loc))
print("running inference on " + random_test_image)
pred = model.predict(test_set_loc + random_test_image, confidence=40, overlap=30).json()
pred
"""## 🏆 Congratulations
### Learning Resources
Roboflow has produced many resources that you may find interesting as you advance your knowledge of computer vision:
- [Roboflow Notebooks](https://github.com/roboflow/notebooks): A repository of over 20 notebooks that walk through how to train custom models with a range of model types, from YOLOv7 to SegFormer.
- [Roboflow YouTube](https://www.youtube.com/c/Roboflow): Our library of videos featuring deep dives into the latest in computer vision, detailed tutorials that accompany our notebooks, and more.
- [Roboflow Discuss](https://discuss.roboflow.com/): Have a question about how to do something on Roboflow? Ask your question on our discussion forum.
- [Roboflow Models](https://roboflow.com): Learn about state-of-the-art models and their performance. Find links and tutorials to guide your learning.
### Convert data formats
Roboflow provides free utilities to convert data between dozens of popular computer vision formats. Check out [Roboflow Formats](https://roboflow.com/formats) to find tutorials on how to convert data between formats in a few clicks.
### Connect computer vision to your project logic
[Roboflow Templates](https://roboflow.com/templates) is a public gallery of code snippets that you can use to connect computer vision to your project logic. Code snippets range from sending emails after inference to measuring object distance between detections.
"""
Tài liệu tham khảo
AI Việt Nam
Hết.