summaryrefslogtreecommitdiff
path: root/traintessv4.sh
blob: 92da4be515d262a9061d042f466c29938a315630 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#!/bin/sh
usage="Usage: $0 gtdir gtevaldir oldtraineddata trainingname"

test $# -ne 4 && echo "$usage" && exit 1

## Settings ##
# This retrains the top layers
#extra="--append_index 5"
#netspec="[Lfx512 O1c1]"
# This fine-tunes the existing layers (copying the existing best/eng netspec)
extra=""
netspec="[1,36,0,1 Ct3,3,16 Mp3,3 Lfys64 Lfx96 Lrx96 Lfx512 O1c1]"
iterations=10000

oldtraining="$3"
name="$4"

mkdir -p "$name"

printf 'gtdir: %s\ngtevaldir: %s\noldtraineddata: %s\ntrainingname: %s\nnetspec: %s\niterations: %s\nextra_args: %s\n' \
	"$1" "$2" "$3" "$4" "$netspec" $iterations "$extra" > "$name/settings"


echo "Copying training ground truth"
mkdir -p "$name/gt"
find "$1" -type f -name '*tif' -o -name '*png' -o -name '*txt' -o -name '*box' | while read i; do
	n=`basename "$i" | sed 's/\.bin\.png$/.png/g; s/\.gt\.txt$/.txt/g'`
	cp "$i" "$name/gt/$n"
done

echo "Copying eval ground truth"
mkdir -p "$name/eval"
find "$2" -type f -name '*tif' -o -name '*png' -o -name '*txt' -o -name '*box' | while read i; do
	n=`basename "$i" | sed 's/\.bin\.png$/.png/g; s/\.gt\.txt$/.txt/g'`
	cp "$i" "$name/eval/$n"
done

echo "Making box files"
find "$name/gt" "$name/eval" -type f -name '*txt' | while read i; do
	b=`basename "$i" .txt`
	d=`dirname "$i"`
	n=""
	test -f "$d/$b.tif" && n="$b.tif"
	test -f "$d/$b.png" && n="$b.png"
	test -z "$n" && echo "Skipping $i as no corresponding image found" && continue
	test -f "$d/$b.box" && echo "Skipping $i as box file already present" && continue
	python ~/training/generate_line_box.py -i "$d/$n" -t "$i" > "$d/$b.box" || exit 1
done

echo "Making unicharset"
unicharset_extractor --output_unicharset "$name/gt/unicharset" --norm_mode 2 "$name/gt/"*box || exit 1

echo "Making lstmf files"
find "$name/gt" "$name/eval" -type f -name '*box' | while read i; do
	b=`basename "$i" .box`
	d=`dirname "$i"`
	test -f "$d/$b.tif" && n="$b.tif"
	test -f "$d/$b.png" && n="$b.png"
	echo "making lstm for $d/$n"
	tesseract "$d/$n" "$d/$b" --psm 6 lstm.train || exit 1
done

echo "Listing lstmf files"
find "$name/gt" -type f -name '*lstmf' > "$name/gt/list"
find "$name/eval" -type f -name '*lstmf' > "$name/eval/list"

echo "Unpacking old training"
mkdir -p "$name/orig"
cp "$oldtraining" "$name/orig/orig.traineddata"
combine_tessdata -u "$name/orig/orig.traineddata" "$name/orig/orig" || exit 1

echo "Making complete unicharset"
merge_unicharsets "$name/gt/unicharset" "$name/orig/orig.lstm-unicharset" "$name/unicharset" || exit 1

echo "Making starter training"
mkdir -p "$name/starter"
curl -L -f 'https://github.com/tesseract-ocr/langdata_lstm/raw/master/radical-stroke.txt' > "$name/starter/radical-stroke.txt" || exit 1
combine_lang_model --input_unicharset "$name/unicharset" --script_dir "$name/starter" --output_dir "$name/starter" --lang "$name" || exit 1

mkdir -p "$name/checkpoint"

echo "Starting training"
lstmtraining \
	--traineddata "$name/starter/$name/$name.traineddata" \
	--old_traineddata "$name/orig/orig.traineddata" \
	--continue_from "$name/orig/orig.lstm" \
	--net_spec "$netspec" \
	--model_output "$name/checkpoint/$name" \
	--learning_rate 20e-4 \
	--train_listfile "$name/gt/list" \
	--eval_listfile "$name/eval/list" \
	--max_iterations $iterations \
	$extra || exit 1

echo "Saving training"
lstmtraining \
	--stop_training \
	--continue_from "$name/checkpoint/${name}_checkpoint" \
	--traineddata "$name/starter/$name/$name.traineddata" \
	--model_output "$name/$name.traineddata"