Commit 19eb7eb8 authored by Leif's avatar Leif
Browse files

Merge remote-tracking branch 'origin/dygraph' into dy1

parents 0afe6c32 03b7daa5
......@@ -24,4 +24,8 @@ output/
build/
dist/
paddleocr.egg-info/
\ No newline at end of file
paddleocr.egg-info/
/deploy/android_demo/app/OpenCV/
/deploy/android_demo/app/PaddleLite/
/deploy/android_demo/app/.cxx/
/deploy/android_demo/app/cache/
include LICENSE.txt
include LICENSE
include README.md
recursive-include ppocr/utils *.txt utility.py logging.py
recursive-include ppocr/data/ *.py
recursive-include ppocr/utils *.txt utility.py logging.py network.py
recursive-include ppocr/data *.py
recursive-include ppocr/postprocess *.py
recursive-include tools/infer *.py
\ No newline at end of file
recursive-include tools/infer *.py
recursive-include ppocr/utils/e2e_utils *.py
recursive-include ppstructure *.py
\ No newline at end of file
......@@ -27,7 +27,12 @@ import json
import cv2
__dir__ = os.path.dirname(os.path.abspath(__file__))
import numpy as np
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
sys.path.append("..")
......@@ -78,7 +83,7 @@ class WindowMixin(object):
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
......@@ -92,13 +97,13 @@ class WindowMixin(object):
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, lang="ch", defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
def __init__(self, lang="ch", gpu=False, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
self.settings.load()
settings = self.settings
self.lang = lang
# Load string bundle for i18n
......@@ -108,7 +113,7 @@ class MainWindow(QMainWindow, WindowMixin):
getStr = lambda strId: self.stringBundle.getString(strId)
self.defaultSaveDir = defaultSaveDir
self.ocr = PaddleOCR(use_pdserving=False, use_angle_cls=True, det=True, cls=True, use_gpu=False, lang=lang)
self.ocr = PaddleOCR(use_pdserving=False, use_angle_cls=True, det=True, cls=True, use_gpu=gpu, lang=lang)
if os.path.exists('./data/paddle.png'):
result = self.ocr.ocr('./data/paddle.png', cls=True, det=True)
......@@ -147,6 +152,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.itemsToShapesbox = {}
self.shapesToItemsbox = {}
self.prevLabelText = getStr('tempLabel')
self.noLabelText = getStr('nullLabel')
self.model = 'paddle'
self.PPreader = None
self.autoSaveNum = 5
......@@ -158,7 +164,7 @@ class MainWindow(QMainWindow, WindowMixin):
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
self.AutoRecognition = QToolButton()
self.AutoRecognition.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.AutoRecognition.setIcon(newIcon('Auto'))
......@@ -175,7 +181,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.addDockWidget(Qt.LeftDockWidgetArea, self.filedock)
######## Right area ##########
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
......@@ -249,7 +255,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.imgsplider.setMaximum(150)
self.imgsplider.setSingleStep(1)
self.imgsplider.setTickPosition(QSlider.TicksBelow)
self.imgsplider.setTickInterval(1)
self.imgsplider.setTickInterval(1)
op = QGraphicsOpacityEffect()
op.setOpacity(0.2)
self.imgsplider.setGraphicsEffect(op)
......@@ -265,7 +271,9 @@ class MainWindow(QMainWindow, WindowMixin):
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.zoomWidgetValue = self.zoomWidget.value()
self.msgBox = QMessageBox()
########## thumbnail #########
hlayout = QHBoxLayout()
m = (0, 0, 0, 0)
......@@ -293,7 +301,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.nextButton.setStyleSheet('border: none;')
self.nextButton.clicked.connect(self.openNextImg)
self.nextButton.setShortcut('d')
hlayout.addWidget(self.preButton)
hlayout.addWidget(self.iconlist)
hlayout.addWidget(self.nextButton)
......@@ -302,7 +310,7 @@ class MainWindow(QMainWindow, WindowMixin):
iconListContainer = QWidget()
iconListContainer.setLayout(hlayout)
iconListContainer.setFixedHeight(100)
########### Canvas ###########
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
......@@ -359,6 +367,9 @@ class MainWindow(QMainWindow, WindowMixin):
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
open_dataset_dir = action(getStr('openDatasetDir'), self.openDatasetDirDialog,
'Ctrl+p', 'open', getStr('openDatasetDir'), enabled=False)
save = action(getStr('save'), self.saveFile,
'Ctrl+V', 'verify', getStr('saveDetail'), enabled=False)
......@@ -397,6 +408,7 @@ class MainWindow(QMainWindow, WindowMixin):
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
showSteps = action(getStr('steps'), self.showStepsDialog, None, 'help', getStr('steps'))
showKeys = action(getStr('keys'), self.showKeysDialog, None, 'help', getStr('keys'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
......@@ -437,7 +449,7 @@ class MainWindow(QMainWindow, WindowMixin):
AutoRec = action(getStr('autoRecognition'), self.autoRecognition,
'', 'Auto', getStr('autoRecognition'), enabled=False)
reRec = action(getStr('reRecognition'), self.reRecognition,
reRec = action(getStr('reRecognition'), self.reRecognition,
'Ctrl+Shift+R', 'reRec', getStr('reRecognition'), enabled=False)
singleRere = action(getStr('singleRe'), self.singleRerecognition,
......@@ -455,6 +467,12 @@ class MainWindow(QMainWindow, WindowMixin):
undoLastPoint = action(getStr("undoLastPoint"), self.canvas.undoLastPoint,
'Ctrl+Z', "undo", getStr("undoLastPoint"), enabled=False)
rotateLeft = action(getStr("rotateLeft"), partial(self.rotateImgAction,1),
'Ctrl+Alt+L', "rotateLeft", getStr("rotateLeft"), enabled=False)
rotateRight = action(getStr("rotateRight"), partial(self.rotateImgAction,-1),
'Ctrl+Alt+R', "rotateRight", getStr("rotateRight"), enabled=False)
undo = action(getStr("undo"), self.undoShapeEdit,
'Ctrl+Z', "undo", getStr("undo"), enabled=False)
......@@ -518,13 +536,14 @@ class MainWindow(QMainWindow, WindowMixin):
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions, saveLabel=saveLabel,
undo=undo, undoLastPoint=undoLastPoint,
undo=undo, undoLastPoint=undoLastPoint,open_dataset_dir=open_dataset_dir,
rotateLeft=rotateLeft,rotateRight=rotateRight,
fileMenuActions=(
opendir, saveLabel, resetAll, quit),
opendir, open_dataset_dir, saveLabel, resetAll, quit),
beginner=(), advanced=(),
editMenu=(createpoly, edit, copy, delete,singleRere,None, undo, undoLastPoint,
None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete, singleRere),
None, rotateLeft, rotateRight, None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete, singleRere, rotateLeft, rotateRight,),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
......@@ -562,9 +581,9 @@ class MainWindow(QMainWindow, WindowMixin):
self.autoSaveOption.triggered.connect(self.autoSaveFunc)
addActions(self.menus.file,
(opendir, None, saveLabel, saveRec, self.autoSaveOption, None, resetAll, deleteImg, quit))
(opendir, open_dataset_dir, None, saveLabel, saveRec, self.autoSaveOption, None, resetAll, deleteImg, quit))
addActions(self.menus.help, (showSteps, showInfo))
addActions(self.menus.help, (showKeys,showSteps, showInfo))
addActions(self.menus.view, (
self.displayLabelOption, self.labelDialogOption,
None,
......@@ -759,6 +778,10 @@ class MainWindow(QMainWindow, WindowMixin):
msg = stepsInfo(self.lang)
QMessageBox.information(self, u'Information', msg)
def showKeysDialog(self):
msg = keysInfo(self.lang)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
......@@ -772,6 +795,38 @@ class MainWindow(QMainWindow, WindowMixin):
self.actions.create.setEnabled(False)
self.actions.undoLastPoint.setEnabled(True)
def rotateImg(self, filename, k, _value):
self.actions.rotateRight.setEnabled(_value)
pix = cv2.imread(filename)
pix = np.rot90(pix, k)
cv2.imwrite(filename, pix)
self.canvas.update()
self.loadFile(filename)
def rotateImgWarn(self):
if self.lang == 'ch':
self.msgBox.warning (self, "提示", "\n 该图片已经有标注框,旋转操作会打乱标注,建议清除标注框后旋转。")
else:
self.msgBox.warning (self, "Warn", "\n The picture already has a label box, and rotation will disrupt the label.\
It is recommended to clear the label box and rotate it.")
def rotateImgAction(self, k=1, _value=False):
filename = self.mImgList[self.currIndex]
if os.path.exists(filename):
if self.itemsToShapesbox:
self.rotateImgWarn()
else:
self.saveFile()
self.dirty = False
self.rotateImg(filename=filename, k=k, _value=True)
else:
self.rotateImgWarn()
self.actions.rotateRight.setEnabled(False)
self.actions.rotateLeft.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
......@@ -879,7 +934,12 @@ class MainWindow(QMainWindow, WindowMixin):
self.updateComboBox()
def updateBoxlist(self):
for shape in self.canvas.selectedShapes+[self.canvas.hShape]:
self.canvas.selectedShapes_hShape = []
if self.canvas.hShape != None:
self.canvas.selectedShapes_hShape = self.canvas.selectedShapes + [self.canvas.hShape]
else:
self.canvas.selectedShapes_hShape = self.canvas.selectedShapes
for shape in self.canvas.selectedShapes_hShape:
item = self.shapesToItemsbox[shape] # listitem
text = [(int(p.x()), int(p.y())) for p in shape.points]
item.setText(str(text))
......@@ -1020,7 +1080,7 @@ class MainWindow(QMainWindow, WindowMixin):
item.setText(str([(int(p.x()), int(p.y())) for p in shape.points]))
self.updateComboBox()
def updateComboBox(self): # TODO:貌似没用
def updateComboBox(self):
# Get the unique labels and add them to the Combobox.
itemsTextList = [str(self.labelList.item(i).text()) for i in range(self.labelList.count())]
......@@ -1040,7 +1100,7 @@ class MainWindow(QMainWindow, WindowMixin):
return dict(label=s.label, # str
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points], # QPonitF
points=[(int(p.x()), int(p.y())) for p in s.points], # QPonitF
# add chris
difficult=s.difficult) # bool
......@@ -1069,7 +1129,7 @@ class MainWindow(QMainWindow, WindowMixin):
# print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except:
self.errorMessage(u'Error saving label data')
self.errorMessage(u'Error saving label data', u'Error saving label data')
return False
def copySelectedShape(self):
......@@ -1238,6 +1298,8 @@ class MainWindow(QMainWindow, WindowMixin):
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
if self.dirty:
self.mayContinue()
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
......@@ -1266,7 +1328,7 @@ class MainWindow(QMainWindow, WindowMixin):
titem = self.iconlist.item(i)
titem.setSelected(True)
self.iconlist.scrollToItem(titem)
break
break
else:
self.fileListWidget.clear()
self.mImgList.clear()
......@@ -1274,7 +1336,7 @@ class MainWindow(QMainWindow, WindowMixin):
# if unicodeFilePath and self.iconList.count() > 0:
# if unicodeFilePath in self.mImgList:
if unicodeFilePath and os.path.exists(unicodeFilePath):
self.canvas.verified = False
......@@ -1305,7 +1367,7 @@ class MainWindow(QMainWindow, WindowMixin):
self.addRecentFile(self.filePath)
self.toggleActions(True)
self.showBoundingBoxFromPPlabel(filePath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
......@@ -1317,7 +1379,7 @@ class MainWindow(QMainWindow, WindowMixin):
return True
return False
def showBoundingBoxFromPPlabel(self, filePath):
imgidx = self.getImglabelidx(filePath)
if imgidx not in self.PPlabel.keys():
......@@ -1410,6 +1472,7 @@ class MainWindow(QMainWindow, WindowMixin):
def loadRecent(self, filename):
if self.mayContinue():
print(filename,"======")
self.loadFile(filename)
def scanAllImages(self, folderPath):
......@@ -1445,6 +1508,23 @@ class MainWindow(QMainWindow, WindowMixin):
self.lastOpenDir = targetDirPath
self.importDirImages(targetDirPath)
def openDatasetDirDialog(self,):
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
if platform.system() == 'Windows':
os.startfile(self.lastOpenDir)
else:
os.system('open ' + os.path.normpath(self.lastOpenDir))
defaultOpenDirPath = self.lastOpenDir
else:
if self.lang == 'ch':
self.msgBox.warning(self, "提示", "\n 原文件夹已不存在,请从新选择数据集路径!")
else:
self.msgBox.warning(self, "Warn", "\n The original folder no longer exists, please choose the data set path again!")
self.actions.open_dataset_dir.setEnabled(False)
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
def importDirImages(self, dirpath, isDelete = False):
if not self.mayContinue() or not dirpath:
return
......@@ -1492,6 +1572,10 @@ class MainWindow(QMainWindow, WindowMixin):
self.reRecogButton.setEnabled(True)
self.actions.AutoRec.setEnabled(True)
self.actions.reRec.setEnabled(True)
self.actions.open_dataset_dir.setEnabled(True)
self.actions.rotateLeft.setEnabled(True)
self.actions.rotateRight.setEnabled(True)
def openPrevImg(self, _value=False):
......@@ -1500,7 +1584,7 @@ class MainWindow(QMainWindow, WindowMixin):
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
self.mImgList5 = self.mImgList[:5]
if currIndex - 1 >= 0:
......@@ -1530,7 +1614,7 @@ class MainWindow(QMainWindow, WindowMixin):
if filename:
print('file name in openNext is ',filename)
self.loadFile(filename)
def updateFileListIcon(self, filename):
pass
......@@ -1642,7 +1726,7 @@ class MainWindow(QMainWindow, WindowMixin):
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self): #
if not self.dirty:
if not self.dirty:
return True
else:
discardChanges = self.discardChangesDialog()
......@@ -1802,10 +1886,14 @@ class MainWindow(QMainWindow, WindowMixin):
result.insert(0, box)
print('result in reRec is ', result)
self.result_dic.append(result)
if result[1][0] == shape.label:
print('label no change')
else:
rec_flag += 1
else:
print('Can not recognise the box')
self.result_dic.append([box,(self.noLabelText,0)])
if self.noLabelText == shape.label or result[1][0] == shape.label:
print('label no change')
else:
rec_flag += 1
if len(self.result_dic) > 0 and rec_flag > 0:
self.saveFile(mode='Auto')
......@@ -1836,9 +1924,14 @@ class MainWindow(QMainWindow, WindowMixin):
print('label no change')
else:
shape.label = result[1][0]
self.singleLabel(shape)
self.setDirty()
print(box)
else:
print('Can not recognise the box')
if self.noLabelText == shape.label:
print('label no change')
else:
shape.label = self.noLabelText
self.singleLabel(shape)
self.setDirty()
def autolcm(self):
vbox = QVBoxLayout()
......@@ -2027,6 +2120,8 @@ def read(filename, default=None):
except:
return default
def str2bool(v):
return v.lower() in ("true", "t", "1")
def get_main_app(argv=[]):
"""
......@@ -2038,13 +2133,14 @@ def get_main_app(argv=[]):
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
argparser = argparse.ArgumentParser()
argparser.add_argument("--lang", default='en', nargs="?")
argparser.add_argument("--lang", type=str, default='en', nargs="?")
argparser.add_argument("--gpu", type=str2bool, default=False, nargs="?")
argparser.add_argument("--predefined_classes_file",
default=os.path.join(os.path.dirname(__file__), "data", "predefined_classes.txt"),
nargs="?")
args = argparser.parse_args(argv[1:])
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(lang=args.lang,
win = MainWindow(lang=args.lang, gpu=args.gpu,
defaultPrefdefClassFile=args.predefined_classes_file)
win.show()
return app, win
......@@ -2057,7 +2153,7 @@ def main():
if __name__ == '__main__':
resource_file = './libs/resources.py'
if not os.path.exists(resource_file):
output = os.system('pyrcc5 -o libs/resources.py resources.qrc')
......
......@@ -8,9 +8,12 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w
### Recent Update
- 2021.8.11:
- New functions: Open the dataset folder, image rotation (Note: Please delete the label box before rotating the image) (by [Wei-JL](https://github.com/Wei-JL))
- Added shortcut key description (Help-Shortcut Key), repaired the direction shortcut key movement function under batch processing (by [d2623587501](https://github.com/d2623587501))
- 2021.2.5: New batch processing and undo functions (by [Evezerest](https://github.com/Evezerest)):
- Batch processing function: Press and hold the Ctrl key to select the box, you can move, copy, and delete in batches.
- Undo function: In the process of drawing a four-point label box or after editing the box, press Ctrl+Z to undo the previous operation.
- **Batch processing function**: Press and hold the Ctrl key to select the box, you can move, copy, and delete in batches.
- **Undo function**: In the process of drawing a four-point label box or after editing the box, press Ctrl+Z to undo the previous operation.
- Fix image rotation and size problems, optimize the process of editing the mark frame (by [ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc)).
- 2021.1.11: Optimize the labeling experience (by [edencfc](https://github.com/edencfc)),
- Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog".
......@@ -23,17 +26,51 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w
## Installation
### 1. Install PaddleOCR
### 1. Environment Preparation
PaddleOCR models has been built in PPOCRLabel, please refer to [PaddleOCR installation document](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md) to prepare PaddleOCR and make sure it works.
#### **Install PaddlePaddle 2.0**
### 2. Install PPOCRLabel
```bash
pip3 install --upgrade pip
# If you have cuda9 or cuda10 installed on your machine, please run the following command to install
python3 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple
# If you only have cpu on your machine, please run the following command to install
python3 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple
```
For more software version requirements, please refer to the instructions in [Installation Document](https://www.paddlepaddle.org.cn/install/quick) for operation.
#### **Install PaddleOCR**
```bash
# Recommend
git clone https://github.com/PaddlePaddle/PaddleOCR
# If you cannot pull successfully due to network problems, you can also choose to use the code hosting on the cloud:
git clone https://gitee.com/paddlepaddle/PaddleOCR
#### Windows + Anaconda
# Note: The cloud-hosting code may not be able to synchronize the update with this GitHub project in real time. There might be a delay of 3-5 days. Please give priority to the recommended method.
```
Download and install [Anaconda](https://www.anaconda.com/download/#download) (Python 3+)
#### **Install Third-party Libraries**
```bash
cd PaddleOCR
pip3 install -r requirements.txt
```
If you getting this error `OSError: [WinError 126] The specified module could not be found` when you install shapely on windows. Please try to download Shapely whl file using http://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely.
Reference: [Solve shapely installation on windows](https://stackoverflow.com/questions/44398265/install-shapely-oserror-winerror-126-the-specified-module-could-not-be-found)
### 2. Install PPOCRLabel
#### Windows
```bash
pip install pyqt5
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
python PPOCRLabel.py
......@@ -41,15 +78,15 @@ python PPOCRLabel.py
#### Ubuntu Linux
```
```bash
pip3 install pyqt5
pip3 install trash-cli
cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder
python3 PPOCRLabel.py
```
#### macOS
```
#### MacOS
```bash
pip3 install pyqt5
pip3 uninstall opencv-python # Uninstall opencv manually as it conflicts with pyqt
pip3 install opencv-contrib-python-headless==4.2.0.32 # Install the headless version of opencv
......@@ -79,11 +116,11 @@ python3 PPOCRLabel.py
7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.
8. Click "Check", the image status will switch to "√",then the program automatically jump to the next.
8. **Click "Check", the image status will switch to "√",then the program automatically jump to the next.**
9. Click "Delete Image" and the image will be deleted to the recycle bin.
10. Labeling result: the user can save manually through the menu "File - Save Label", while the program will also save automatically if "File - Auto Save Label Mode" is selected. The manually checked label will be stored in *Label.txt* under the opened picture folder. Click "PaddleOCR"-"Save Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>.
10. Labeling result: the user can export the label result manually through the menu "File - Export Label", while the program will also export automatically if "File - Auto export Label Mode" is selected. The manually checked label will be stored in *Label.txt* under the opened picture folder. Click "File"-"Export Recognition Results" in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*<sup>[4]</sup>.
### Note
......@@ -97,10 +134,10 @@ python3 PPOCRLabel.py
| File name | Description |
| :-----------: | :----------------------------------------------------------: |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 5 label results, the file will be automatically saved. It will also be written when the user closes the application or changes the file folder. |
| Label.txt | The detection label file can be directly used for PPOCR detection model training. After the user saves 5 label results, the file will be automatically exported. It will also be written when the user closes the application or changes the file folder. |
| fileState.txt | The picture status file save the image in the current folder that has been manually confirmed by the user. |
| Cache.cach | Cache files to save the results of model recognition. |
| rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Save recognition result". |
| rec_gt.txt | The recognition label file, which can be directly used for PPOCR identification model training, is generated after the user clicks on the menu bar "File"-"Export recognition result". |
| crop_img | The recognition data, generated at the same time with *rec_gt.txt* |
## Explanation
......@@ -134,16 +171,16 @@ python3 PPOCRLabel.py
- Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model)
### Save
### Export Label Result
PPOCRLabel supports three ways to save Label.txt
PPOCRLabel supports three ways to export Label.txt
- Automatically save: After selecting "File - Auto Save Label Mode", the program will automatically write the annotations into Label.txt every time the user confirms an image. If this option is not turned on, it will be automatically saved after detecting that the user has manually checked 5 images.
- Manual save: Click "File-Save Marking Results" to manually save the label.
- Close application save
- Automatically export: After selecting "File - Auto Export Label Mode", the program will automatically write the annotations into Label.txt every time the user confirms an image. If this option is not turned on, it will be automatically exported after detecting that the user has manually checked 5 images.
- Manual export: Click "File-Export Marking Results" to manually export the label.
- Close application export
### Export partial recognition results
### Export Partial Recognition Results
For some data that are difficult to recognize, the recognition results will not be exported by **unchecking** the corresponding tags in the recognition results checkbox.
......
......@@ -8,9 +8,12 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P
#### 近期更新
- 2021.8.11:
- 新增功能:打开数据所在文件夹、图像旋转(注意:旋转前的图片上不能存在标记框)(by [Wei-JL](https://github.com/Wei-JL)
- 新增快捷键说明(帮助-快捷键)、修复批处理下的方向快捷键移动功能(by [d2623587501](https://github.com/d2623587501)
- 2021.2.5:新增批处理与撤销功能(by [Evezerest](https://github.com/Evezerest))
- 批处理功能:按住Ctrl键选择标记框后可批量移动、复制、删除。
- 撤销功能:在绘制四点标注框过程中或对框进行编辑操作后,按下Ctrl+Z可撤销上一部操作。
- **批处理功能**:按住Ctrl键选择标记框后可批量移动、复制、删除、重新识别
- **撤销功能**:在绘制四点标注框过程中或对框进行编辑操作后,按下Ctrl+Z可撤销上一部操作。
- 修复图像旋转和尺寸问题、优化编辑标记框过程(by [ninetailskim](https://github.com/ninetailskim)[edencfc](https://github.com/edencfc)
- 2021.1.11:优化标注体验(by [edencfc](https://github.com/edencfc)):
- 用户可在“视图 - 弹出标记输入框”选择在画完检测框后标记输入框是否弹出。
......@@ -27,13 +30,48 @@ PPOCRLabel是一款适用于OCR领域的半自动化图形标注工具,内置P
## 安装
### 1. 安装PaddleOCR
PPOCRLabel内置PaddleOCR模型,故请参考[PaddleOCR安装文档](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/installation.md)准备好PaddleOCR,并确保PaddleOCR安装成功。
### 1. 环境搭建
#### 安装PaddlePaddle
### 2. 安装PPOCRLabel
#### Windows + Anaconda
```bash
pip3 install --upgrade pip
如果您的机器安装的是CUDA9或CUDA10,请运行以下命令安装
python3 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple
如果您的机器是CPU,请运行以下命令安装
python3 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple
```
更多的版本需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。
#### **安装PaddleOCR**
```bash
【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR
如果因为网络问题无法pull成功,也可选择使用码云上的托管:
git clone https://gitee.com/paddlepaddle/PaddleOCR
注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。
```
#### 安装第三方库
```bash
cd PaddleOCR
pip3 install -r requirements.txt
```
注意,windows环境下,建议从[这里](https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely)下载shapely安装包完成安装, 直接通过pip安装的shapely库可能出现`[winRrror 126] 找不到指定模块的问题`
### 2. 安装PPOCRLabel
#### Windows
```bash
pip install pyqt5
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
python PPOCRLabel.py --lang ch
......@@ -41,15 +79,15 @@ python PPOCRLabel.py --lang ch
#### Ubuntu Linux
```
```bash
pip3 install pyqt5
pip3 install trash-cli
cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
python3 PPOCRLabel.py --lang ch
```
#### macOS
```
#### MacOS
```bash
pip3 install pyqt5
pip3 uninstall opencv-python # 由于mac版本的opencv与pyqt有冲突,需先手动卸载opencv
pip3 install opencv-contrib-python-headless==4.2.0.32 # 安装headless版本的open-cv
......@@ -57,6 +95,8 @@ cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下
python3 PPOCRLabel.py --lang ch
```
## 使用
### 操作步骤
......@@ -68,9 +108,9 @@ python3 PPOCRLabel.py --lang ch
5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。
6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别<sup>[3]</sup>
7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。
8. **确认标记**:点击 “确认”,图片状态切换为 “√”,跳转至下一张。
8. **确认标记:点击 “确认”,图片状态切换为 “√”,跳转至下一张。**
9. 删除:点击 “删除图像”,图片将会被删除至回收站。
10. 保存结果:用户可以通过菜单中“文件-保存标记结果”手动保存,同时也可以点击“文件 - 自动保存标记结果”开启自动保存。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "保存识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
10. 导出结果:用户可以通过菜单中“文件-导出标记结果”手动导出,同时也可以点击“文件 - 自动导出标记结果”开启自动导出。手动确认过的标记将会被存放在所打开图片文件夹下的*Label.txt*中。在菜单栏点击 “文件” - "导出识别结果"后,会将此类图片的识别训练数据保存在*crop_img*文件夹下,识别标签保存在*rec_gt.txt*<sup>[4]</sup>
### 注意
......@@ -84,10 +124,10 @@ python3 PPOCRLabel.py --lang ch
| 文件名 | 说明 |
| :-----------: | :----------------------------------------------------------: |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每保存5张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| Label.txt | 检测标签,可直接用于PPOCR检测模型训练。用户每确认5张检测结果后,程序会进行自动写入。当用户关闭应用程序或切换文件路径后同样会进行写入。 |
| fileState.txt | 图片状态标记文件,保存当前文件夹下已经被用户手动确认过的图片名称。 |
| Cache.cach | 缓存文件,保存模型自动识别的结果。 |
| rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "保存识别结果"后产生。 |
| rec_gt.txt | 识别标签。可直接用于PPOCR识别模型训练。需用户手动点击菜单栏“文件” - "导出识别结果"后产生。 |
| crop_img | 识别数据。按照检测框切割后的图片。与rec_gt.txt同时产生。 |
## 说明
......@@ -120,19 +160,19 @@ python3 PPOCRLabel.py --lang ch
- 自定义模型:用户可根据[自定义模型代码使用](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/whl.md#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%A8%A1%E5%9E%8B),通过修改PPOCRLabel.py中针对[PaddleOCR类的实例化](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110)替换成自己训练的模型。
### 保存方式
### 导出标记结果
PPOCRLabel支持三种保存方式:
PPOCRLabel支持三种导出方式:
- 自动保存:点击“文件 - 自动保存标记结果”后,用户每确认过一张图片,程序自动将标记结果写入Label.txt中。若未开启此选项,则检测到用户手动确认过5张图片后进行自动保存
- 手动保存:点击“文件 - 保存标记结果”手动保存标记。
- 关闭应用程序保存
- 自动导出:点击“文件 - 自动导出标记结果”后,用户每确认过一张图片,程序自动将标记结果写入Label.txt中。若未开启此选项,则检测到用户手动确认过5张图片后进行自动导出
- 手动导出:点击“文件 - 导出标记结果”手动导出标记。
- 关闭应用程序导出
### 导出部分识别结果
针对部分难以识别的数据,通过在识别结果的复选框中**取消勾选**相应的标记,其识别结果不会被导出。
*注意:识别结果中的复选框状态仍需用户手动点击保存后才能保留*
*注意:识别结果中的复选框状态仍需用户手动点击确认后才能保留*
### 错误提示
- 如果同时使用whl包安装了paddleocr,其优先级大于通过paddleocr.py调用PaddleOCR类,whl包未更新时会导致程序异常。
......
......@@ -23,6 +23,7 @@ except ImportError:
from libs.shape import Shape
from libs.utils import distance
import copy
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
......@@ -45,7 +46,7 @@ class Canvas(QWidget):
CREATE, EDIT = list(range(2))
_fill_drawing = False # draw shadows
epsilon = 11.0
epsilon = 5.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
......@@ -81,6 +82,7 @@ class Canvas(QWidget):
self.fourpoint = True # ADD
self.pointnum = 0
self.movingShape = False
self.selectCountShape = False
#initialisation for panning
self.pan_initial_pos = QPoint()
......@@ -702,6 +704,10 @@ class Canvas(QWidget):
def keyPressEvent(self, ev):
key = ev.key()
shapesBackup = []
shapesBackup = copy.deepcopy(self.shapes)
self.shapesBackups.pop()
self.shapesBackups.append(shapesBackup)
if key == Qt.Key_Escape and self.current:
print('ESC press')
self.current = None
......@@ -709,41 +715,48 @@ class Canvas(QWidget):
self.update()
elif key == Qt.Key_Return and self.canCloseShape():
self.finalise()
elif key == Qt.Key_Left and self.selectedShape:
elif key == Qt.Key_Left and self.selectedShapes:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
elif key == Qt.Key_Right and self.selectedShapes:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
elif key == Qt.Key_Up and self.selectedShapes:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
elif key == Qt.Key_Down and self.selectedShapes:
self.moveOnePixel('Down')
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.selectCount = len(self.selectedShapes)
self.selectCountShape = True
for i in range(len(self.selectedShapes)):
self.selectedShape = self.selectedShapes[i]
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
shapesBackup = []
shapesBackup = copy.deepcopy(self.shapes)
self.shapesBackups.append(shapesBackup)
self.shapeMoved.emit()
self.repaint()
......@@ -840,6 +853,7 @@ class Canvas(QWidget):
def restoreShape(self):
if not self.isShapeRestorable:
return
self.shapesBackups.pop() # latest
shapesBackup = self.shapesBackups.pop()
self.shapes = shapesBackup
......
This diff is collapsed.
......@@ -124,6 +124,15 @@ def natural_sort(list, key=lambda s:s):
def get_rotate_crop_image(img, points):
# Use Green's theory to judge clockwise or counterclockwise
# author: biyanhua
d = 0.0
for index in range(-1, 3):
d += -0.5 * (points[index + 1][1] + points[index][1]) * (
points[index + 1][0] - points[index][0])
if d < 0: # counterclockwise
tmp = np.array(points)
points[1], points[3] = tmp[3], tmp[1]
try:
img_crop_width = int(
......@@ -165,6 +174,7 @@ def stepsInfo(lang='en'):
"10. 标注结果:关闭应用程序或切换文件路径后,手动保存过的标签将会被存放在所打开图片文件夹下的" \
"*Label.txt*中。在菜单栏点击 “PaddleOCR” - 保存识别结果后,会将此类图片的识别训练数据保存在*crop_img*文件夹下," \
"识别标签保存在*rec_gt.txt*中。\n"
else:
msg = "1. Build and launch using the instructions above.\n" \
"2. Click 'Open Dir' in Menu/File to select the folder of the picture.\n"\
......@@ -178,5 +188,57 @@ def stepsInfo(lang='en'):
"8. Click 'Save', the image status will switch to '√',then the program automatically jump to the next.\n"\
"9. Click 'Delete Image' and the image will be deleted to the recycle bin.\n"\
"10. Labeling result: After closing the application or switching the file path, the manually saved label will be stored in *Label.txt* under the opened picture folder.\n"\
" Click PaddleOCR-Save Recognition Results in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*.\n"
" Click PaddleOCR-Save Recognition Results in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*.\n"
return msg
def keysInfo(lang='en'):
if lang == 'ch':
msg = "快捷键\t\t\t说明\n" \
"———————————————————————\n"\
"Ctrl + shift + R\t\t对当前图片的所有标记重新识别\n" \
"W\t\t\t新建矩形框\n" \
"Q\t\t\t新建四点框\n" \
"Ctrl + E\t\t编辑所选框标签\n" \
"Ctrl + R\t\t重新识别所选标记\n" \
"Ctrl + C\t\t复制并粘贴选中的标记框\n" \
"Ctrl + 鼠标左键\t\t多选标记框\n" \
"Backspace\t\t删除所选框\n" \
"Ctrl + V\t\t确认本张图片标记\n" \
"Ctrl + Shift + d\t删除本张图片\n" \
"D\t\t\t下一张图片\n" \
"A\t\t\t上一张图片\n" \
"Ctrl++\t\t\t缩小\n" \
"Ctrl--\t\t\t放大\n" \
"↑→↓←\t\t\t移动标记框\n" \
"———————————————————————\n" \
"注:Mac用户Command键替换上述Ctrl键"
else:
msg = "Shortcut Keys\t\tDescription\n" \
"———————————————————————\n" \
"Ctrl + shift + R\t\tRe-recognize all the labels\n" \
"\t\t\tof the current image\n" \
"\n"\
"W\t\t\tCreate a rect box\n" \
"Q\t\t\tCreate a four-points box\n" \
"Ctrl + E\t\tEdit label of the selected box\n" \
"Ctrl + R\t\tRe-recognize the selected box\n" \
"Ctrl + C\t\tCopy and paste the selected\n" \
"\t\t\tbox\n" \
"\n"\
"Ctrl + Left Mouse\tMulti select the label\n" \
"Button\t\t\tbox\n" \
"\n"\
"Backspace\t\tDelete the selected box\n" \
"Ctrl + V\t\tCheck image\n" \
"Ctrl + Shift + d\tDelete image\n" \
"D\t\t\tNext image\n" \
"A\t\t\tPrevious image\n" \
"Ctrl++\t\t\tZoom in\n" \
"Ctrl--\t\t\tZoom out\n" \
"↑→↓←\t\t\tMove selected box" \
"———————————————————————\n" \
"Notice:For Mac users, use the 'Command' key instead of the 'Ctrl' key"
return msg
\ No newline at end of file
......@@ -18,6 +18,8 @@
<file alias="quit">resources/icons/quit.png</file>
<file alias="copy">resources/icons/copy.png</file>
<file alias="edit">resources/icons/edit.png</file>
<file alias="rotateLeft">resources/icons/rotateLeft.png</file>
<file alias="rotateRight">resources/icons/rotateRight.png</file>
<file alias="open">resources/icons/open.png</file>
<file alias="save">resources/icons/save.png</file>
<file alias="format_voc">resources/icons/format_voc.png</file>
......
......@@ -31,6 +31,7 @@ save=确认
saveAs=另存为
fitWinDetail=缩放到当前窗口大小
openDir=打开目录
openDatasetDir=打开数据集路径
copyPrevBounding=复制当前图像中的上一个边界框
showHide=显示/隐藏标签
changeSaveFormat=更改存储格式
......@@ -85,18 +86,22 @@ detectionBoxposition=检测框位置
recognitionResult=识别结果
creatPolygon=四点标注
drawSquares=正方形标注
saveRec=保存识别结果
rotateLeft=图片左旋转90度
rotateRight=图片右旋转90度
saveRec=导出识别结果
tempLabel=待识别
nullLabel=无法识别
steps=操作步骤
keys=快捷键
choseModelLg=选择模型语言
cancel=取消
ok=确认
autolabeling=自动标注中
hideBox=隐藏所有标注
showBox=显示所有标注
saveLabel=保存标记结果
saveLabel=导出标记结果
singleRe=重识别此区块
labelDialogOption=弹出标记输入框
undo=撤销
undoLastPoint=撤销上个点
autoSaveMode=自动保存标记结果
\ No newline at end of file
autoSaveMode=自动导出标记结果
\ No newline at end of file
......@@ -3,6 +3,7 @@ openFileDetail=Open image or label file
quit=Quit
quitApp=Quit application
openDir=Open Dir
openDatasetDir=Open DatasetDir
copyPrevBounding=Copy previous Bounding Boxes in the current image
changeSavedAnnotationDir=Change default saved Annotation dir
openAnnotation=Open Annotation
......@@ -77,26 +78,30 @@ IR=Image Resize
autoRecognition=Auto Recognition
reRecognition=Re-recognition
mfile=File
medit=Eidt
medit=Edit
mview=View
mhelp=Help
iconList=Icon List
detectionBoxposition=Detection box position
recognitionResult=Recognition result
creatPolygon=Create Quadrilateral
rotateLeft=Left turn 90 degrees
rotateRight=Right turn 90 degrees
drawSquares=Draw Squares
saveRec=Save Recognition Result
saveRec=Export Recognition Result
tempLabel=TEMPORARY
nullLabel=NULL
steps=Steps
keys=Shortcut Keys
choseModelLg=Choose Model Language
cancel=Cancel
ok=OK
autolabeling=Automatic Labeling
hideBox=Hide All Box
showBox=Show All Box
saveLabel=Save Label
saveLabel=Export Label
singleRe=Re-recognition RectBox
labelDialogOption=Pop-up Label Input Dialog
undo=Undo
undoLastPoint=Undo Last Point
autoSaveMode=Auto Save Label Mode
\ No newline at end of file
autoSaveMode=Auto Export Label Mode
\ No newline at end of file
......@@ -32,7 +32,8 @@ PaddleOCR supports both dynamic graph and static graph programming paradigm
<div align="center">
<img src="doc/imgs_results/ch_ppocr_mobile_v2.0/test_add_91.jpg" width="800">
<img src="doc/imgs_results/ch_ppocr_mobile_v2.0/00018069.jpg" width="800">
<img src="doc/imgs_results/multi_lang/img_01.jpg" width="800">
<img src="doc/imgs_results/multi_lang/img_02.jpg" width="800">
</div>
The above pictures are the visualizations of the general ppocr_server model. For more effect pictures, please see [More visualizations](./doc/doc_en/visualization_en.md).
......@@ -42,7 +43,7 @@ The above pictures are the visualizations of the general ppocr_server model. For
- Scan the QR code below with your Wechat, you can access to official technical exchange group. Look forward to your participation.
<div align="center">
<img src="https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.0/doc/joinus.PNG" width = "200" height = "200" />
<img src="https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/dygraph/doc/joinus.PNG" width = "200" height = "200" />
</div>
......@@ -94,7 +95,7 @@ For a new language request, please refer to [Guideline for new language_requests
- [Python Inference](./doc/doc_en/inference_en.md)
- [C++ Inference](./deploy/cpp_infer/readme_en.md)
- [Serving](./deploy/pdserving/README.md)
- [Mobile](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme_en.md)
- [Mobile](./deploy/lite/readme_en.md)
- [Benchmark](./doc/doc_en/benchmark_en.md)
- Data Annotation and Synthesis
- [Semi-automatic Annotation Tool: PPOCRLabel](./PPOCRLabel/README.md)
......
......@@ -8,9 +8,9 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- 静态图版本:develop分支
**近期更新**
- 2021.4.8 release 2.1版本,新增AAAI 2021论文[端到端识别算法PGNet](./doc/doc_ch/pgnet.md)开源,[多语言模型](./doc/doc_ch/multi_languages.md)支持种类增加到80+。
- 2021.2.1 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数162个,每周一都会更新,欢迎大家持续关注。
- 2021.1.26,28,29 PaddleOCR官方研发团队带来技术深入解读三日直播课,1月26日、28日、29日晚上19:30,[直播地址](https://live.bilibili.com/21689802)
- 2021.1.21 更新多语言识别模型,目前支持语种超过27种,[多语言模型下载](./doc/doc_ch/models_list.md),包括中文简体、中文繁体、英文、法文、德文、韩文、日文、意大利文、西班牙文、葡萄牙文、俄罗斯文、阿拉伯文等,后续计划可以参考[多语言研发计划](https://github.com/PaddlePaddle/PaddleOCR/issues/1048)
- 2021.1.21 更新多语言识别模型,目前支持语种超过27种,包括中文简体、中文繁体、英文、法文、德文、韩文、日文、意大利文、西班牙文、葡萄牙文、俄罗斯文、阿拉伯文等,后续计划可以参考[多语言研发计划](https://github.com/PaddlePaddle/PaddleOCR/issues/1048)
- 2020.12.15 更新数据合成工具[Style-Text](./StyleText/README_ch.md),可以批量合成大量与目标场景类似的图像,在多个场景验证,效果明显提升。
- 2020.11.25 更新半自动标注工具[PPOCRLabel](./PPOCRLabel/README_ch.md),辅助开发者高效完成标注任务,输出格式与PP-OCR训练任务完美衔接。
- 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941
......@@ -46,7 +46,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- 微信扫描二维码加入官方交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。
<div align="center">
<img src="https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/release/2.0/doc/joinus.PNG" width = "200" height = "200" />
<img src="https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/dygraph/doc/joinus.PNG" width = "200" height = "200" />
</div>
## 快速体验
......@@ -74,11 +74,13 @@ PaddleOCR同时支持动态图与静态图两种编程范式
## 文档教程
- [快速安装](./doc/doc_ch/installation.md)
- [中文OCR模型快速使用](./doc/doc_ch/quickstart.md)
- [多语言OCR模型快速使用](./doc/doc_ch/multi_languages.md)
- [代码组织结构](./doc/doc_ch/tree.md)
- 算法介绍
- [文本检测](./doc/doc_ch/algorithm_overview.md)
- [文本识别](./doc/doc_ch/algorithm_overview.md)
- [PP-OCR Pipline](#PP-OCR)
- [PP-OCR Pipeline](#PP-OCR)
- [端到端PGNet算法](./doc/doc_ch/pgnet.md)
- 模型训练/评估
- [文本检测](./doc/doc_ch/detection.md)
- [文本识别](./doc/doc_ch/recognition.md)
......@@ -89,7 +91,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式
- [基于Python脚本预测引擎推理](./doc/doc_ch/inference.md)
- [基于C++预测引擎推理](./deploy/cpp_infer/readme.md)
- [服务化部署](./deploy/pdserving/README_CN.md)
- [端侧部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)
- [端侧部署](./deploy/lite/readme.md)
- [Benchmark](./doc/doc_ch/benchmark.md)
- 数据集
- [通用中英文OCR数据集](./doc/doc_ch/datasets.md)
......@@ -112,7 +114,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式
<a name="PP-OCR"></a>
## PP-OCR Pipline
## PP-OCR Pipeline
<div align="center">
<img src="./doc/ppocr_framework.png" width="800">
</div>
......
......@@ -66,6 +66,7 @@ class StdTextDrawer(object):
corpus_list.append(corpus[0:i])
text_input_list.append(text_input)
corpus = corpus[i:]
i = 0
break
draw.text((char_x, 2), char_i, fill=(0, 0, 0), font=font)
char_x += char_size
......@@ -78,7 +79,6 @@ class StdTextDrawer(object):
corpus_list.append(corpus[0:i])
text_input_list.append(text_input)
corpus = corpus[i:]
break
return corpus_list, text_input_list
......@@ -11,7 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddleocr
from .paddleocr import *
__all__ = ['PaddleOCR', 'draw_ocr']
from .paddleocr import PaddleOCR
from .tools.infer.utility import draw_ocr
__version__ = paddleocr.VERSION
__all__ = ['PaddleOCR', 'PPStructure', 'draw_ocr', 'draw_structure_result', 'save_structure_res','download_with_progressbar']
......@@ -7,11 +7,6 @@ Global:
save_epoch_step: 1200
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [3000, 2000]
# 1. If pretrained_model is saved in static mode, such as classification pretrained model
# from static branch, load_static_weights must be set as True.
# 2. If you want to finetune the pretrained models we provide in the docs,
# you should set load_static_weights as False.
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
checkpoints:
......
......@@ -7,11 +7,6 @@ Global:
save_epoch_step: 1200
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [3000, 2000]
# 1. If pretrained_model is saved in static mode, such as classification pretrained model
# from static branch, load_static_weights must be set as True.
# 2. If you want to finetune the pretrained models we provide in the docs,
# you should set load_static_weights as False.
load_static_weights: True
cal_metric_during_train: False
pretrained_model: ./pretrain_models/ResNet18_vd_pretrained
checkpoints:
......
Global:
use_gpu: true
epoch_num: 1200
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/ch_db_mv3/
save_epoch_step: 1200
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [3000, 2000]
cal_metric_during_train: False
pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
checkpoints:
save_inference_dir:
use_visualdl: False
infer_img: doc/imgs_en/img_10.jpg
save_res_path: ./output/det_db/predicts_db.txt
Architecture:
name: DistillationModel
algorithm: Distillation
Models:
Student:
pretrained: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
freeze_params: false
return_all_feats: false
model_type: det
algorithm: DB
Backbone:
name: MobileNetV3
scale: 0.5
model_name: large
disable_se: True
Neck:
name: DBFPN
out_channels: 96
Head:
name: DBHead
k: 50
Student2:
pretrained: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
freeze_params: false
return_all_feats: false
model_type: det
algorithm: DB
Transform:
Backbone:
name: MobileNetV3
scale: 0.5
model_name: large
disable_se: True
Neck:
name: DBFPN
out_channels: 96
Head:
name: DBHead
k: 50
Teacher:
pretrained: ./pretrain_models/ch_ppocr_server_v2.0_det_train/best_accuracy
freeze_params: true
return_all_feats: false
model_type: det
algorithm: DB
Transform:
Backbone:
name: ResNet
layers: 18
Neck:
name: DBFPN
out_channels: 256
Head:
name: DBHead
k: 50
Loss:
name: CombinedLoss
loss_config_list:
- DistillationDilaDBLoss:
weight: 1.0
model_name_pairs:
- ["Student", "Teacher"]
- ["Student2", "Teacher"]
key: maps
balance_loss: true
main_loss_type: DiceLoss
alpha: 5
beta: 10
ohem_ratio: 3
- DistillationDMLLoss:
model_name_pairs:
- ["Student", "Student2"]
maps_name: "thrink_maps"
weight: 1.0
# act: None
model_name_pairs: ["Student", "Student2"]
key: maps
- DistillationDBLoss:
weight: 1.0
model_name_list: ["Student", "Student2"]
# key: maps
# name: DBLoss
balance_loss: true
main_loss_type: DiceLoss
alpha: 5
beta: 10
ohem_ratio: 3
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
lr:
name: Cosine
learning_rate: 0.001
warmup_epoch: 2
regularizer:
name: 'L2'
factor: 0
PostProcess:
name: DistillationDBPostProcess
model_name: ["Student", "Student2", "Teacher"]
# key: maps
thresh: 0.3
box_thresh: 0.6
max_candidates: 1000
unclip_ratio: 1.5
Metric:
name: DistillationMetric
base_metric_name: DetMetric
main_indicator: hmean
key: "Student"
Train:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
ratio_list: [1.0]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- IaaAugment:
augmenter_args:
- { 'type': Fliplr, 'args': { 'p': 0.5 } }
- { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
- { 'type': Resize, 'args': { 'size': [0.5, 3] } }
- EastRandomCropData:
size: [960, 960]
max_tries: 50
keep_ratio: true
- MakeBorderMap:
shrink_ratio: 0.4
thresh_min: 0.3
thresh_max: 0.7
- MakeShrinkMap:
shrink_ratio: 0.4
min_text_size: 8
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
loader:
shuffle: True
drop_last: False
batch_size_per_card: 8
num_workers: 4
Eval:
dataset:
name: SimpleDataSet
data_dir: ./train_data/icdar2015/text_localization/
label_file_list:
- ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- DetLabelEncode: # Class handling label
- DetResizeForTest:
# image_shape: [736, 1280]
- NormalizeImage:
scale: 1./255.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: 'hwc'
- ToCHWImage:
- KeepKeys:
keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
loader:
shuffle: False
drop_last: False
batch_size_per_card: 1 # must be 1
num_workers: 2
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment