-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmy_errata.html
2170 lines (1775 loc) · 153 KB
/
my_errata.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html>
<body>
<style type="text/css" >
#T_bf535_row0_col0,#T_bf535_row0_col1,#T_bf535_row0_col2,#T_bf535_row0_col3,#T_bf535_row0_col4,#T_bf535_row1_col0,#T_bf535_row1_col1,#T_bf535_row1_col2,#T_bf535_row1_col3,#T_bf535_row1_col4,#T_bf535_row2_col0,#T_bf535_row2_col1,#T_bf535_row2_col2,#T_bf535_row2_col3,#T_bf535_row2_col4,#T_bf535_row3_col0,#T_bf535_row3_col1,#T_bf535_row3_col2,#T_bf535_row3_col3,#T_bf535_row3_col4,#T_bf535_row4_col0,#T_bf535_row4_col1,#T_bf535_row4_col2,#T_bf535_row4_col3,#T_bf535_row4_col4,#T_bf535_row5_col0,#T_bf535_row5_col1,#T_bf535_row5_col2,#T_bf535_row5_col3,#T_bf535_row5_col4,#T_bf535_row6_col0,#T_bf535_row6_col1,#T_bf535_row6_col2,#T_bf535_row6_col3,#T_bf535_row6_col4,#T_bf535_row7_col0,#T_bf535_row7_col1,#T_bf535_row7_col2,#T_bf535_row7_col3,#T_bf535_row7_col4,#T_bf535_row8_col0,#T_bf535_row8_col1,#T_bf535_row8_col2,#T_bf535_row8_col3,#T_bf535_row8_col4,#T_bf535_row9_col0,#T_bf535_row9_col1,#T_bf535_row9_col2,#T_bf535_row9_col3,#T_bf535_row9_col4,#T_bf535_row10_col0,#T_bf535_row10_col1,#T_bf535_row10_col2,#T_bf535_row10_col3,#T_bf535_row10_col4,#T_bf535_row11_col0,#T_bf535_row11_col1,#T_bf535_row11_col2,#T_bf535_row11_col3,#T_bf535_row11_col4,#T_bf535_row12_col0,#T_bf535_row12_col1,#T_bf535_row12_col2,#T_bf535_row12_col3,#T_bf535_row12_col4,#T_bf535_row13_col0,#T_bf535_row13_col1,#T_bf535_row13_col2,#T_bf535_row13_col3,#T_bf535_row13_col4,#T_bf535_row14_col0,#T_bf535_row14_col1,#T_bf535_row14_col2,#T_bf535_row14_col3,#T_bf535_row14_col4,#T_bf535_row15_col0,#T_bf535_row15_col1,#T_bf535_row15_col2,#T_bf535_row15_col3,#T_bf535_row15_col4,#T_bf535_row16_col0,#T_bf535_row16_col1,#T_bf535_row16_col2,#T_bf535_row16_col3,#T_bf535_row16_col4,#T_bf535_row17_col0,#T_bf535_row17_col1,#T_bf535_row17_col2,#T_bf535_row17_col3,#T_bf535_row17_col4,#T_bf535_row18_col0,#T_bf535_row18_col1,#T_bf535_row18_col2,#T_bf535_row18_col3,#T_bf535_row18_col4,#T_bf535_row19_col0,#T_bf535_row19_col1,#T_bf535_row19_col2,#T_bf535_row19_col3,#T_bf535_row19_col4,#T_bf535_row20_col0,#T_bf535_row20_col1,#T_bf535_row20_col2,#T_bf535_row20_col3,#T_bf535_row20_col4,#T_bf535_row21_col0,#T_bf535_row21_col1,#T_bf535_row21_col2,#T_bf535_row21_col3,#T_bf535_row21_col4,#T_bf535_row22_col0,#T_bf535_row22_col1,#T_bf535_row22_col2,#T_bf535_row22_col3,#T_bf535_row22_col4,#T_bf535_row23_col0,#T_bf535_row23_col1,#T_bf535_row23_col2,#T_bf535_row23_col3,#T_bf535_row23_col4,#T_bf535_row24_col0,#T_bf535_row24_col1,#T_bf535_row24_col2,#T_bf535_row24_col3,#T_bf535_row24_col4,#T_bf535_row25_col0,#T_bf535_row25_col1,#T_bf535_row25_col2,#T_bf535_row25_col3,#T_bf535_row25_col4,#T_bf535_row26_col0,#T_bf535_row26_col1,#T_bf535_row26_col2,#T_bf535_row26_col3,#T_bf535_row26_col4,#T_bf535_row27_col0,#T_bf535_row27_col1,#T_bf535_row27_col2,#T_bf535_row27_col3,#T_bf535_row27_col4,#T_bf535_row28_col0,#T_bf535_row28_col1,#T_bf535_row28_col2,#T_bf535_row28_col3,#T_bf535_row28_col4,#T_bf535_row29_col0,#T_bf535_row29_col1,#T_bf535_row29_col2,#T_bf535_row29_col3,#T_bf535_row29_col4,#T_bf535_row30_col0,#T_bf535_row30_col1,#T_bf535_row30_col2,#T_bf535_row30_col3,#T_bf535_row30_col4,#T_bf535_row31_col0,#T_bf535_row31_col1,#T_bf535_row31_col2,#T_bf535_row31_col3,#T_bf535_row31_col4,#T_bf535_row32_col0,#T_bf535_row32_col1,#T_bf535_row32_col2,#T_bf535_row32_col3,#T_bf535_row32_col4,#T_bf535_row33_col0,#T_bf535_row33_col1,#T_bf535_row33_col2,#T_bf535_row33_col3,#T_bf535_row33_col4,#T_bf535_row34_col0,#T_bf535_row34_col1,#T_bf535_row34_col2,#T_bf535_row34_col3,#T_bf535_row34_col4,#T_bf535_row35_col0,#T_bf535_row35_col1,#T_bf535_row35_col2,#T_bf535_row35_col3,#T_bf535_row35_col4,#T_bf535_row36_col0,#T_bf535_row36_col1,#T_bf535_row36_col2,#T_bf535_row36_col3,#T_bf535_row36_col4,#T_bf535_row37_col0,#T_bf535_row37_col1,#T_bf535_row37_col2,#T_bf535_row37_col3,#T_bf535_row37_col4,#T_bf535_row38_col0,#T_bf535_row38_col1,#T_bf535_row38_col2,#T_bf535_row38_col3,#T_bf535_row38_col4,#T_bf535_row39_col0,#T_bf535_row39_col1,#T_bf535_row39_col2,#T_bf535_row39_col3,#T_bf535_row39_col4,#T_bf535_row40_col0,#T_bf535_row40_col1,#T_bf535_row40_col2,#T_bf535_row40_col3,#T_bf535_row40_col4,#T_bf535_row41_col0,#T_bf535_row41_col1,#T_bf535_row41_col2,#T_bf535_row41_col3,#T_bf535_row41_col4,#T_bf535_row42_col0,#T_bf535_row42_col1,#T_bf535_row42_col2,#T_bf535_row42_col3,#T_bf535_row42_col4,#T_bf535_row43_col0,#T_bf535_row43_col1,#T_bf535_row43_col2,#T_bf535_row43_col3,#T_bf535_row43_col4,#T_bf535_row44_col0,#T_bf535_row44_col1,#T_bf535_row44_col2,#T_bf535_row44_col3,#T_bf535_row44_col4,#T_bf535_row45_col0,#T_bf535_row45_col1,#T_bf535_row45_col2,#T_bf535_row45_col3,#T_bf535_row45_col4,#T_bf535_row46_col0,#T_bf535_row46_col1,#T_bf535_row46_col2,#T_bf535_row46_col3,#T_bf535_row46_col4,#T_bf535_row47_col0,#T_bf535_row47_col1,#T_bf535_row47_col2,#T_bf535_row47_col3,#T_bf535_row47_col4,#T_bf535_row48_col0,#T_bf535_row48_col1,#T_bf535_row48_col2,#T_bf535_row48_col3,#T_bf535_row48_col4,#T_bf535_row49_col0,#T_bf535_row49_col1,#T_bf535_row49_col2,#T_bf535_row49_col3,#T_bf535_row49_col4,#T_bf535_row50_col0,#T_bf535_row50_col1,#T_bf535_row50_col2,#T_bf535_row50_col3,#T_bf535_row50_col4,#T_bf535_row51_col0,#T_bf535_row51_col1,#T_bf535_row51_col2,#T_bf535_row51_col3,#T_bf535_row51_col4,#T_bf535_row52_col0,#T_bf535_row52_col1,#T_bf535_row52_col2,#T_bf535_row52_col3,#T_bf535_row52_col4,#T_bf535_row53_col0,#T_bf535_row53_col1,#T_bf535_row53_col2,#T_bf535_row53_col3,#T_bf535_row53_col4,#T_bf535_row54_col0,#T_bf535_row54_col1,#T_bf535_row54_col2,#T_bf535_row54_col3,#T_bf535_row54_col4,#T_bf535_row55_col0,#T_bf535_row55_col1,#T_bf535_row55_col2,#T_bf535_row55_col3,#T_bf535_row55_col4,#T_bf535_row56_col0,#T_bf535_row56_col1,#T_bf535_row56_col2,#T_bf535_row56_col3,#T_bf535_row56_col4,#T_bf535_row57_col0,#T_bf535_row57_col1,#T_bf535_row57_col2,#T_bf535_row57_col3,#T_bf535_row57_col4,#T_bf535_row58_col0,#T_bf535_row58_col1,#T_bf535_row58_col2,#T_bf535_row58_col3,#T_bf535_row58_col4,#T_bf535_row59_col0,#T_bf535_row59_col1,#T_bf535_row59_col2,#T_bf535_row59_col3,#T_bf535_row59_col4,#T_bf535_row60_col0,#T_bf535_row60_col1,#T_bf535_row60_col2,#T_bf535_row60_col3,#T_bf535_row60_col4,#T_bf535_row61_col0,#T_bf535_row61_col1,#T_bf535_row61_col2,#T_bf535_row61_col3,#T_bf535_row61_col4,#T_bf535_row62_col0,#T_bf535_row62_col1,#T_bf535_row62_col2,#T_bf535_row62_col3,#T_bf535_row62_col4,#T_bf535_row63_col0,#T_bf535_row63_col1,#T_bf535_row63_col2,#T_bf535_row63_col3,#T_bf535_row63_col4,#T_bf535_row64_col0,#T_bf535_row64_col1,#T_bf535_row64_col2,#T_bf535_row64_col3,#T_bf535_row64_col4,#T_bf535_row65_col0,#T_bf535_row65_col1,#T_bf535_row65_col2,#T_bf535_row65_col3,#T_bf535_row65_col4,#T_bf535_row66_col0,#T_bf535_row66_col1,#T_bf535_row66_col2,#T_bf535_row66_col3,#T_bf535_row66_col4,#T_bf535_row67_col0,#T_bf535_row67_col1,#T_bf535_row67_col2,#T_bf535_row67_col3,#T_bf535_row67_col4,#T_bf535_row68_col0,#T_bf535_row68_col1,#T_bf535_row68_col2,#T_bf535_row68_col3,#T_bf535_row68_col4,#T_bf535_row69_col0,#T_bf535_row69_col1,#T_bf535_row69_col2,#T_bf535_row69_col3,#T_bf535_row69_col4,#T_bf535_row70_col0,#T_bf535_row70_col1,#T_bf535_row70_col2,#T_bf535_row70_col3,#T_bf535_row70_col4,#T_bf535_row71_col0,#T_bf535_row71_col1,#T_bf535_row71_col2,#T_bf535_row71_col3,#T_bf535_row71_col4,#T_bf535_row72_col0,#T_bf535_row72_col1,#T_bf535_row72_col2,#T_bf535_row72_col3,#T_bf535_row72_col4,#T_bf535_row73_col0,#T_bf535_row73_col1,#T_bf535_row73_col2,#T_bf535_row73_col3,#T_bf535_row73_col4,#T_bf535_row74_col0,#T_bf535_row74_col1,#T_bf535_row74_col2,#T_bf535_row74_col3,#T_bf535_row74_col4,#T_bf535_row75_col0,#T_bf535_row75_col1,#T_bf535_row75_col2,#T_bf535_row75_col3,#T_bf535_row75_col4,#T_bf535_row76_col0,#T_bf535_row76_col1,#T_bf535_row76_col2,#T_bf535_row76_col3,#T_bf535_row76_col4,#T_bf535_row77_col0,#T_bf535_row77_col1,#T_bf535_row77_col2,#T_bf535_row77_col3,#T_bf535_row77_col4,#T_bf535_row78_col0,#T_bf535_row78_col1,#T_bf535_row78_col2,#T_bf535_row78_col3,#T_bf535_row78_col4,#T_bf535_row79_col0,#T_bf535_row79_col1,#T_bf535_row79_col2,#T_bf535_row79_col3,#T_bf535_row79_col4,#T_bf535_row80_col0,#T_bf535_row80_col1,#T_bf535_row80_col2,#T_bf535_row80_col3,#T_bf535_row80_col4,#T_bf535_row81_col0,#T_bf535_row81_col1,#T_bf535_row81_col2,#T_bf535_row81_col3,#T_bf535_row81_col4,#T_bf535_row82_col0,#T_bf535_row82_col1,#T_bf535_row82_col2,#T_bf535_row82_col3,#T_bf535_row82_col4,#T_bf535_row83_col0,#T_bf535_row83_col1,#T_bf535_row83_col2,#T_bf535_row83_col3,#T_bf535_row83_col4,#T_bf535_row84_col0,#T_bf535_row84_col1,#T_bf535_row84_col2,#T_bf535_row84_col3,#T_bf535_row84_col4,#T_bf535_row85_col0,#T_bf535_row85_col1,#T_bf535_row85_col2,#T_bf535_row85_col3,#T_bf535_row85_col4,#T_bf535_row86_col0,#T_bf535_row86_col1,#T_bf535_row86_col2,#T_bf535_row86_col3,#T_bf535_row86_col4,#T_bf535_row87_col0,#T_bf535_row87_col1,#T_bf535_row87_col2,#T_bf535_row87_col3,#T_bf535_row87_col4,#T_bf535_row88_col0,#T_bf535_row88_col1,#T_bf535_row88_col2,#T_bf535_row88_col3,#T_bf535_row88_col4,#T_bf535_row89_col0,#T_bf535_row89_col1,#T_bf535_row89_col2,#T_bf535_row89_col3,#T_bf535_row89_col4,#T_bf535_row90_col0,#T_bf535_row90_col1,#T_bf535_row90_col2,#T_bf535_row90_col3,#T_bf535_row90_col4,#T_bf535_row91_col0,#T_bf535_row91_col1,#T_bf535_row91_col2,#T_bf535_row91_col3,#T_bf535_row91_col4,#T_bf535_row92_col0,#T_bf535_row92_col1,#T_bf535_row92_col2,#T_bf535_row92_col3,#T_bf535_row92_col4,#T_bf535_row93_col0,#T_bf535_row93_col1,#T_bf535_row93_col2,#T_bf535_row93_col3,#T_bf535_row93_col4,#T_bf535_row94_col0,#T_bf535_row94_col1,#T_bf535_row94_col2,#T_bf535_row94_col3,#T_bf535_row94_col4,#T_bf535_row95_col0,#T_bf535_row95_col1,#T_bf535_row95_col2,#T_bf535_row95_col3,#T_bf535_row95_col4,#T_bf535_row96_col0,#T_bf535_row96_col1,#T_bf535_row96_col2,#T_bf535_row96_col3,#T_bf535_row96_col4,#T_bf535_row97_col0,#T_bf535_row97_col1,#T_bf535_row97_col2,#T_bf535_row97_col3,#T_bf535_row97_col4,#T_bf535_row98_col0,#T_bf535_row98_col1,#T_bf535_row98_col2,#T_bf535_row98_col3,#T_bf535_row98_col4,#T_bf535_row99_col0,#T_bf535_row99_col1,#T_bf535_row99_col2,#T_bf535_row99_col3,#T_bf535_row99_col4,#T_bf535_row100_col0,#T_bf535_row100_col1,#T_bf535_row100_col2,#T_bf535_row100_col3,#T_bf535_row100_col4,#T_bf535_row101_col0,#T_bf535_row101_col1,#T_bf535_row101_col2,#T_bf535_row101_col3,#T_bf535_row101_col4,#T_bf535_row102_col0,#T_bf535_row102_col1,#T_bf535_row102_col2,#T_bf535_row102_col3,#T_bf535_row102_col4,#T_bf535_row103_col0,#T_bf535_row103_col1,#T_bf535_row103_col2,#T_bf535_row103_col3,#T_bf535_row103_col4,#T_bf535_row104_col0,#T_bf535_row104_col1,#T_bf535_row104_col2,#T_bf535_row104_col3,#T_bf535_row104_col4,#T_bf535_row105_col0,#T_bf535_row105_col1,#T_bf535_row105_col2,#T_bf535_row105_col3,#T_bf535_row105_col4,#T_bf535_row106_col0,#T_bf535_row106_col1,#T_bf535_row106_col2,#T_bf535_row106_col3,#T_bf535_row106_col4,#T_bf535_row107_col0,#T_bf535_row107_col1,#T_bf535_row107_col2,#T_bf535_row107_col3,#T_bf535_row107_col4,#T_bf535_row108_col0,#T_bf535_row108_col1,#T_bf535_row108_col2,#T_bf535_row108_col3,#T_bf535_row108_col4,#T_bf535_row109_col0,#T_bf535_row109_col1,#T_bf535_row109_col2,#T_bf535_row109_col3,#T_bf535_row109_col4,#T_bf535_row110_col0,#T_bf535_row110_col1,#T_bf535_row110_col2,#T_bf535_row110_col3,#T_bf535_row110_col4,#T_bf535_row111_col0,#T_bf535_row111_col1,#T_bf535_row111_col2,#T_bf535_row111_col3,#T_bf535_row111_col4,#T_bf535_row112_col0,#T_bf535_row112_col1,#T_bf535_row112_col2,#T_bf535_row112_col3,#T_bf535_row112_col4,#T_bf535_row113_col0,#T_bf535_row113_col1,#T_bf535_row113_col2,#T_bf535_row113_col3,#T_bf535_row113_col4,#T_bf535_row114_col0,#T_bf535_row114_col1,#T_bf535_row114_col2,#T_bf535_row114_col3,#T_bf535_row114_col4,#T_bf535_row115_col0,#T_bf535_row115_col1,#T_bf535_row115_col2,#T_bf535_row115_col3,#T_bf535_row115_col4,#T_bf535_row116_col0,#T_bf535_row116_col1,#T_bf535_row116_col2,#T_bf535_row116_col3,#T_bf535_row116_col4,#T_bf535_row117_col0,#T_bf535_row117_col1,#T_bf535_row117_col2,#T_bf535_row117_col3,#T_bf535_row117_col4,#T_bf535_row118_col0,#T_bf535_row118_col1,#T_bf535_row118_col2,#T_bf535_row118_col3,#T_bf535_row118_col4,#T_bf535_row119_col0,#T_bf535_row119_col1,#T_bf535_row119_col2,#T_bf535_row119_col3,#T_bf535_row119_col4,#T_bf535_row120_col0,#T_bf535_row120_col1,#T_bf535_row120_col2,#T_bf535_row120_col3,#T_bf535_row120_col4,#T_bf535_row121_col0,#T_bf535_row121_col1,#T_bf535_row121_col2,#T_bf535_row121_col3,#T_bf535_row121_col4{
text-align: left;
white-space: pre-wrap;
font-family: Arial;
font-size: 11pt;
}</style><table id="T_bf535_" border="1"><thead> <tr> <th class="col_heading level0 col0" >Version</th> <th class="col_heading level0 col1" >Chapter</th> <th class="col_heading level0 col2" >Page</th> <th class="col_heading level0 col3" >Location</th> <th class="col_heading level0 col4" >Description</th> </tr></thead><tbody>
<tr>
<td id="T_bf535_row0_col0" class="data row0 col0" >Safari Books Online</td>
<td id="T_bf535_row0_col1" class="data row0 col1" >-1</td>
<td id="T_bf535_row0_col2" class="data row0 col2" >-1</td>
<td id="T_bf535_row0_col3" class="data row0 col3" >?
Section: Computing Gradients Using Autodiff</td>
<td id="T_bf535_row0_col4" class="data row0 col4" >Super minor typo: just replace
you must call the tape’s jabobian() method
with
you must call the tape’s jacobian() method</td>
</tr>
<tr>
<td id="T_bf535_row1_col0" class="data row1 col0" >Safari Books Online</td>
<td id="T_bf535_row1_col1" class="data row1 col1" >-1</td>
<td id="T_bf535_row1_col2" class="data row1 col2" >-1</td>
<td id="T_bf535_row1_col3" class="data row1 col3" >"Changes in the Second Edition," Numbered List Point 1</td>
<td id="T_bf535_row1_col4" class="data row1 col4" >'covolutional' should be 'convolutional' (missing an 'n').
(I couldn't find page numbers in the Safari Books Online iPad app.)
Note from the Author or Editor:Good catch, thanks. Fixed.</td>
</tr>
<tr>
<td id="T_bf535_row2_col0" class="data row2 col0" >Safari Books Online</td>
<td id="T_bf535_row2_col1" class="data row2 col1" >-1</td>
<td id="T_bf535_row2_col2" class="data row2 col2" >-1</td>
<td id="T_bf535_row2_col3" class="data row2 col3" >??
Right under "Training and Evaluating the Model"</td>
<td id="T_bf535_row2_col4" class="data row2 col4" >When I fit the model (including on Google Colab), it shows progress out of 1719 rather than out of 55000 (as shown in the book), even though X_train has 55000 rows. What's going on?
Note from the Author or Editor:Thanks for your question!
Keras changed the way it displays progress during training since I wrote the book (after a bit of investigation, it looks like it happened in TensorFlow 2.2). Keras used to display the number of samples processed so far during the epoch (something like 38816/55000), but it now shows the number of *batches* processed so far. So if the batch size is 32 (which is the default) then there are math.ceil(55000/32)=1719 batches per epoch, so you would see 1213/1719 (instead of 38816/55000).
I'll update the book to show the new format.
Thanks a lot!
Cheers,
Aurelien</td>
</tr>
<tr>
<td id="T_bf535_row3_col0" class="data row3 col0" >Safari Books Online</td>
<td id="T_bf535_row3_col1" class="data row3 col1" >1</td>
<td id="T_bf535_row3_col2" class="data row3 col2" >1</td>
<td id="T_bf535_row3_col3" class="data row3 col3" >1
First line.</td>
<td id="T_bf535_row3_col4" class="data row3 col4" >First sentence reads...
"When most people hear 'Machine Learning,' they picture a robot: a dependable butler or a deadly Terminator, depending on who you ask."
It's not "...who you ask," it's "... whom you ask."
Should use proper English, at least in the very first sentence of the book.
You would not say "You ask he," you'd say "You ask him."
Note from the Author or Editor:Thanks for your feedback. As you might know, I am French, so please forgive my English mistakes. The he/him rule is very helpful.
It's interesting that no one pointed out this error to me before, even though it's in the very first sentence! :) I think it goes to show that people are getting used to this mistake, to the point that many people on the Web seem to argue that "whom" now sounds too formal. Perhaps in a few decades it will no longer be considered a mistake.
That said, of course, I've fixed the book now, thanks again!</td>
</tr>
<tr>
<td id="T_bf535_row4_col0" class="data row4 col0" >PDF</td>
<td id="T_bf535_row4_col1" class="data row4 col1" >1</td>
<td id="T_bf535_row4_col2" class="data row4 col2" >14</td>
<td id="T_bf535_row4_col3" class="data row4 col3" >Page 14
First paragraph - First line</td>
<td id="T_bf535_row4_col4" class="data row4 col4" >an additional "ag" next to "is" : "Reinforcement Learning isag a very" -> "Reinforcement Learning is a very"
Note from the Author or Editor:Good catch, thanks. I fixed this typo, it should be fine now in the electronic versions, and it will be correct in the 2nd release of the book (printed in October).</td>
</tr>
<tr>
<td id="T_bf535_row5_col0" class="data row5 col0" >Printed</td>
<td id="T_bf535_row5_col1" class="data row5 col1" >1</td>
<td id="T_bf535_row5_col2" class="data row5 col2" >14</td>
<td id="T_bf535_row5_col3" class="data row5 col3" >Page 14
2nd line</td>
<td id="T_bf535_row5_col4" class="data row5 col4" >Reinforcement Learning isag a very different beast.
Note from the Author or Editor:Good catch, thanks!</td>
</tr>
<tr>
<td id="T_bf535_row6_col0" class="data row6 col0" >PDF</td>
<td id="T_bf535_row6_col1" class="data row6 col1" >1</td>
<td id="T_bf535_row6_col2" class="data row6 col2" >30</td>
<td id="T_bf535_row6_col3" class="data row6 col3" >Page 30
Bullet pt listing in "Underfitting the Training Data" section</td>
<td id="T_bf535_row6_col4" class="data row6 col4" >The list of methods to counter underfitting is in plain text, while the analogous list with regards to overfitting in the previous section was highlighted in a warning/caution frame; might want to adjust.
Note from the Author or Editor:Thanks, good point. I'll change the underfitting section to use a warning frame.</td>
</tr>
<tr>
<td id="T_bf535_row7_col0" class="data row7 col0" >Printed</td>
<td id="T_bf535_row7_col1" class="data row7 col1" >1</td>
<td id="T_bf535_row7_col2" class="data row7 col2" >143</td>
<td id="T_bf535_row7_col3" class="data row7 col3" >Page 143
Eq 4-13</td>
<td id="T_bf535_row7_col4" class="data row7 col4" >(3rd release)
In Eq 4-13, bottom line of p143 and Eq 4-19,
x^T \theta^{(k)} is used
But for matching the order of theta and x in other places,
I suggest (\theta^{(k)})^T x or \theta^T x
Thanks
Note from the Author or Editor:Thanks for your suggestion, I fixed the 3 instances you pointed out.
FYI, I hesitated between "x^T theta" and "theta^T x" because the first linear equation in chapter 1 is written y = theta0 x0 + theta1 x1 + ..., which naturally translates to y = theta^T x. It would be weird to write y = x0 theta0 + x1 theta1 + ...
However, when dealing with matrices, one typically writes y = X W: here, X has to appear first (and there's no transpose), because each row of X already corresponds to a transposed feature vector. I remember being confused the first time I saw this, so I wanted to quickly transition from theta-first to X-first. However, I was not careful enough, so I ended up having a confusing mixture of both! Oops... I think you're right that consistently using theta-first before we really tackle matrices is probably better.</td>
</tr>
<tr>
<td id="T_bf535_row8_col0" class="data row8 col0" >PDF</td>
<td id="T_bf535_row8_col1" class="data row8 col1" >2</td>
<td id="T_bf535_row8_col2" class="data row8 col2" >47</td>
<td id="T_bf535_row8_col3" class="data row8 col3" >Page 47
End of virtualenv box</td>
<td id="T_bf535_row8_col4" class="data row8 col4" >This is an error of omission.
If we are going to be using jupyter in a virtual environment. Then we must also setup jupyter to use the libraries associated with said environment.
The requires the following two steps
$ python3 -m pip install -U ipykernel
$ python3 -m ipykernel install --user --name=my_env
After that, when starting jupyter you can select "my_env" and start working in that environment.
Note from the Author or Editor:Thanks Mohammed, great catch! Since the ipykernel package is installed automatically along with jupyter, the first command is not required, but the second is important (at least if you plan to have more than one virtualenv, which is the whole point).
I updated the book like this:
--------------------------------------------
$ python3 -m pip install -U jupyter matplotlib numpy pandas scipy scikit-learn
Collecting jupyter
Downloading https://[...]/jupyter-1.0.0-py2.py3-none-any.whl
Collecting matplotlib
[...]
If you created a virtualenv, you need to register it to Jupyter and give it a name:
$ python3 -m ipykernel install --user --name=python3
Now you can fire up Jupyter by typing the following command:
$ jupyter notebook
[...] Serving notebooks from local directory: [...]/ml
[...] The Jupyter Notebook is running at:
[...] http://localhost:8888/?token=60995e108e44ac8d8865a[...]
[...] or http://127.0.0.1:8889/?token=60995e108e44ac8d8865a[...]
[...] Use Control-C to stop this server and shut down all kernels [...]
--------------------------------------------
Notice that I removed this section:
--------------------------------------------
To check your installation, try to import every module like this:
$ python3 -c "import jupyter, matplotlib, numpy, pandas, scipy, sklearn"
There should be no output and no error.
--------------------------------------------
This is because I didn't want the layout of the book to be affected too much, and this paragraph is not necessary since users will notice if there are errors in the previous steps.
Again, thanks a lot for your great feedback!</td>
</tr>
<tr>
<td id="T_bf535_row9_col0" class="data row9 col0" >Printed</td>
<td id="T_bf535_row9_col1" class="data row9 col1" >2</td>
<td id="T_bf535_row9_col2" class="data row9 col2" >67</td>
<td id="T_bf535_row9_col3" class="data row9 col3" >Page 67
Second paragraph</td>
<td id="T_bf535_row9_col4" class="data row9 col4" >"After one- hot encoding we get a matrix with thousands of columns, and the matrix is full of zeros except for one 1 per row."
The resulting matrix has thousands of ROWS, but only 5 columns. The code output directly after this text gives an example.
Note from the Author or Editor:Thanks for your feedback.
I see how this paragraph can be confusing. Please let me clarify. The paragraph starts with:
"""
Notice that the output is a SciPy _sparse matrix_, instead of a NumPy array. This is very useful when you have categorical attributes with thousands of categories. After one-hot encoding, we get a matrix with thousands of columns, and the matrix is full of 0s except for a single 1 per row. [...]
"""
My goal here was to explain that one-hot encoding categorical attributes with thousands of categories will result in a matrix with thousands of columns, in which case it's useful to have a sparse matrix, and that's the reason why the `OneHotEncoder` produces a sparse matrix.
The sentence "After one-hot encoding, we get..." is in the context of the previous sentence "This is very useful when you have categorical attributes with thousands of categories."
But I see how it's possible to interpret the sentence "This is very useful..." as a side comment, independent from the following sentence. In this case, "After one-hot encoding..." would seem to refer to the actual output of the previous code example.
I've rephrased the paragraph to make it clearer:
"""
Notice that the output is a SciPy _sparse matrix_, instead of a NumPy array. This is very useful when you have categorical attributes with thousands of categories, since in this case one-hot encoding will produce a matrix with thousands of columns, and this matrix would be full of 0s, except for a single 1 per row. Using tons of memory mostly to store zeros would be very wasteful, so instead a sparse matrix only stores the location of the nonzero elements. You can use it mostly like a normal 2D array, but if you really want to convert it to a (dense) NumPy array, just call the `toarray()` method:
"""
Thanks again for your feedback!
Cheers,
Aurelien</td>
</tr>
<tr>
<td id="T_bf535_row10_col0" class="data row10 col0" >Safari Books Online</td>
<td id="T_bf535_row10_col1" class="data row10 col1" >3</td>
<td id="T_bf535_row10_col2" class="data row10 col2" >-1</td>
<td id="T_bf535_row10_col3" class="data row10 col3" >ch. 3
Tip under Figure 3-6</td>
<td id="T_bf535_row10_col4" class="data row10 col4" >The tip ends by noting that the PR curve "could be closer to the top-left corner". Assuming you're referring to Figure 3-5, does this mean the top-right corner? That curve, of course, hits the top-left corner.
In either case, it's still not entirely clear to me *why* the ROC is more affected by skewed data. Perhaps this tip could be expanded.</td>
</tr>
<tr>
<td id="T_bf535_row11_col0" class="data row11 col0" >Other Digital Version</td>
<td id="T_bf535_row11_col1" class="data row11 col1" >3</td>
<td id="T_bf535_row11_col2" class="data row11 col2" >-1</td>
<td id="T_bf535_row11_col3" class="data row11 col3" >Ch 3 (code)
Cell 24</td>
<td id="T_bf535_row11_col4" class="data row11 col4" >When I run all of the cells up through
from sklearn.metrics import precision_score, recall_score
precision_score(y_train_5, y_train_pred)
in the Jupyter notebook from GitHub (running on Colab), I get 0.837..., not the 0.729... shown in the (Safari) book.
I believe the problem occurs at least as early as cell 22 (the confusion matrix two cells earlier), which gives:
array([[53892, 687],
[ 1891, 3530]])
rather than the 53057, 1522, 1325, 4096 shown in the book.
This makes cell 25,
4096 / (4096 + 1522)
rather mysterious, as the numbers 4096 and 1522 now seem to come out of nowhere.
Note from the Author or Editor:Thanks for your feedback. Indeed, making the code perfectly reproducible for several years turns out to be quite a challenge! Every time a new version of Scikit-Learn (or NumPy, Keras, TensorFlow, Matplotlib, Pandas) is released, I have to check all the notebooks to ensure they still produce the same output. The most common source of changes is when the default value of some hyperparameter is modified. For example, if the default number of iterations changes, then all the results change. I managed to keep up with this up to now by explicitly setting some of the hyperparameter values to their old default value (or in some cases, to their new default value, when they were announced in advance). You'll see some comments about this in the notebooks.
Unfortunately, sometimes the algorithms themselves get tweaked slightly, and there's really nothing I can do about that. I was fortunate enough to be mostly spared by this problem for the 1st edition, but my luck ran out:
* Scikit-Learn 0.21 fixed some bug in SGDClassifier (and many other models), so models now produce slightly different results (see https://scikit-learn.org/0.21/whats_new.html#id6). This happened a couple months after I had finished writing the book, and it was off to press.
* As if this wasn't enough, TensorFlow 2.1 completely changed the way it generates random numbers, compared to TensorFlow 2.0. So pretty much all TensorFlow models give slightly different results now, and there's no going back.
The only way to reproduce the exact results from the book is to revert to previous versions of Scikit-Learn and TensorFlow. However, I don't recommend this solution. It's preferable to just accept the fact that there will be (hopefully small) differences between the text and the results you get.
In the short term, I'll add warnings to the Jupyter notebooks to explain that the results might differ slightly from the book (and explain why).
Then when I have time, I'll run all the notebooks using the latest version of all libraries, and I'll update all the code examples in the book that need to be changed. Oh wow... This book is so much work... sigh... ;-)
Thanks again for your help.</td>
</tr>
<tr>
<td id="T_bf535_row12_col0" class="data row12 col0" >Safari Books Online</td>
<td id="T_bf535_row12_col1" class="data row12 col1" >3</td>
<td id="T_bf535_row12_col2" class="data row12 col2" >1</td>
<td id="T_bf535_row12_col3" class="data row12 col3" >1
Chapter 3 - Threshold test</td>
<td id="T_bf535_row12_col4" class="data row12 col4" >The following code is used to describe the effect of threshold adjustments on the recall.
>>> threshold = 8000
>>> y_some_digit_pred = (y_scores > threshold)
>>> y_some_digit_pred
array([ True])
The result should be array([False]), as indicated on the GitHub project:
https://github.com/ageron/handson-ml2/blob/master/03_classification.ipynb
An output of 'array([ True])' would indicate that adjusting the threshold had no impact on the recall.
Note from the Author or Editor:Great catch!
Indeed, this was a copy/paste error, thanks for spotting it, I just fixed the book, the fix will be in the next release. I wrote a script that verifies that all the code examples in the book are present in the notebook, but right now it does not look at the outputs, I'll fix that.
Thanks again!
Aurélien</td>
</tr>
<tr>
<td id="T_bf535_row13_col0" class="data row13 col0" >Safari Books Online</td>
<td id="T_bf535_row13_col1" class="data row13 col1" >3</td>
<td id="T_bf535_row13_col2" class="data row13 col2" >3</td>
<td id="T_bf535_row13_col3" class="data row13 col3" >3
Chapter 3. Classification / Confusion Matrix / Equation 3-1. Precision</td>
<td id="T_bf535_row13_col4" class="data row13 col4" >Sorry about my language. In Chapter 3. Classification / Confusion Matrix / Equation 3-1. Precision and Equation 3-2. Recall and Equation 3-3. F1
I do not see the division sign. Can you check all equations?
Note from the Author or Editor:Thanks for your feedback.
I'm guessing you are reading the book on the Safari Platform using the Chrome browser. Unfortunately, Chrome stopped supporting MathML, so the equations don't display properly. O'Reilly is working on fixing this, and I asked them to add a message to warn users.
In the meantime you can work around this issue by using another browser: Firefox or Safari.
Thanks for your understanding.
10/18/2019: the issue is now fixed in Chrome.</td>
</tr>
<tr>
<td id="T_bf535_row14_col0" class="data row14 col0" >Printed</td>
<td id="T_bf535_row14_col1" class="data row14 col1" >3</td>
<td id="T_bf535_row14_col2" class="data row14 col2" >86</td>
<td id="T_bf535_row14_col3" class="data row14 col3" >Page 86
Last line</td>
<td id="T_bf535_row14_col4" class="data row14 col4" >Just a tiny detail here. There is an "import" command missing before the last instruction of the page. NumPy was not loaded yet.
Note from the Author or Editor:Good catch, thanks. In later chapters I did not repeat all the imports, because I though it was redundant (after a while, I assume the reader understands what np stands for and how to import it), but in the earlier chapters, it's useful to spell everything out. Fixed. :)</td>
</tr>
<tr>
<td id="T_bf535_row15_col0" class="data row15 col0" >ePub</td>
<td id="T_bf535_row15_col1" class="data row15 col1" >3</td>
<td id="T_bf535_row15_col2" class="data row15 col2" >86</td>
<td id="T_bf535_row15_col3" class="data row15 col3" >Page 86
some_digit = X[0]</td>
<td id="T_bf535_row15_col4" class="data row15 col4" >some_digit = X[0]
...causes the exception at the bottom of this text. The code instead should be the following:
some_digit = X.values[0]
-----------------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/ml/env/lib/python3.8/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
2897 try:
-> 2898 return self._engine.get_loc(casted_key)
2899 except KeyError as err:
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
KeyError: 0
The above exception was the direct cause of the following exception:
KeyError Traceback (most recent call last)
<ipython-input-18-b7a6042a4eea> in <module>
1 import matplotlib as mpl
2 import matplotlib.pyplot as plt
----> 3 some_digit = X[0]
4 some_digit_image = some_digit.reshape ( 28 , 28 )
5 plt.imshow ( some_digit_image , cmap = mpl.cm.binary , interpolation = "nearest" )
~/ml/env/lib/python3.8/site-packages/pandas/core/frame.py in __getitem__(self, key)
2904 if self.columns.nlevels > 1:
2905 return self._getitem_multilevel(key)
-> 2906 indexer = self.columns.get_loc(key)
2907 if is_integer(indexer):
2908 indexer = [indexer]
~/ml/env/lib/python3.8/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
2898 return self._engine.get_loc(casted_key)
2899 except KeyError as err:
-> 2900 raise KeyError(key) from err
2901
2902 if tolerance is not None:
KeyError: 0
Note from the Author or Editor:Thanks for your feedback.
Since Scikit-Learn 0.24, `fetch_openml()` returns a Pandas `DataFrame` by default, instead of a NumPy array. To avoid this and keep the same code as in the book, just specify `as_frame=False` when calling `fetch_openml()`.
Unfortunately, that's not something that I could have foreseen when writing the book, as version 0.24 was released afterwards. Other little things like that may break over time, so when you run into an issue, please check the notebooks in github project (https://github.com/ageron/handson-ml2): I try to keep them as up to date as I can. For example, there's a warning about the fetch_open() function in the notebooks, and they use as_pandas=False.
That said, I also updated the book so that future releases will use as_pandas=False as well.
Hope this helps,
Aurelien</td>
</tr>
<tr>
<td id="T_bf535_row16_col0" class="data row16 col0" >Other Digital Version</td>
<td id="T_bf535_row16_col1" class="data row16 col1" >3</td>
<td id="T_bf535_row16_col2" class="data row16 col2" >2294</td>
<td id="T_bf535_row16_col3" class="data row16 col3" >2294-2340
Chapter 3 MultiClass Classifaction paragraph 2 and section on SGDClassifier for multiclass classification</td>
<td id="T_bf535_row16_col4" class="data row16 col4" >There is some conflicting information in the Hands-On Machine Learning with Scikit-Learn, Keras, and Tensorflow book and the sci-kit learn documentation. In chapter 3 under Multiclass Classification the author states twice that the stochastic gradient descent classifier (SGDClassifier) can handle multi-class classification problems directly without training multiple binary classifiers using One vs Rest/All. This is listed in the second paragraph as well as one or two pages later. The documentation for the SGDClassifier in sci-kit learn directly contradicts this. It states, “SGDClassifier supports multi-class classification by combining multiple binary classifiers in a “one versus all” (OVA) scheme” (https://scikit-learn.org/stable/modules/sgd.html)
Also, the statement about Logistic Regression being only a binary classifier seems to contradict the sci-kit learn documentation as well. Using the multinomial option, the LR model can learn a true multinomial distribution for multi-class problems (https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression).
Either the book seems incorrect or the sci-kit learn documentation is.
Note from the Author or Editor:Great feedback, thanks a lot!
Regarding the LogisticRegression class, the default value for the multi_class argument changed after the 2nd edition was published (in version 0.22) from 'ovr' to 'auto': so indeed, the new default multi-class behavior is to learn a true multinomial distribution (the old behavior was to train multiple binary classifiers and to use the OvR strategy). I'll update the book for future releases.
Regarding the SGDClassifier class, however, it really seems to be a mistake on my part. :( I tried to search for the origin of my error, perhaps a previous version used a different approach, but it seems that the SGDClassifier behavior has been the same since at least Scikit-Learn 0.17. I'm really sorry about this, I'll update the book now for future releases.
Thanks again for your contribution.</td>
</tr>
<tr>
<td id="T_bf535_row17_col0" class="data row17 col0" >Printed</td>
<td id="T_bf535_row17_col1" class="data row17 col1" >4</td>
<td id="T_bf535_row17_col2" class="data row17 col2" >138</td>
<td id="T_bf535_row17_col3" class="data row17 col3" >Page 138
2nd paragraph</td>
<td id="T_bf535_row17_col4" class="data row17 col4" >It says : "... the dashed line in the righthand plot in Figure 4-18 (with alpha = 10^-7) looks quadratic, almost linear."
Actually, it does not look quadratic (maybe cubic?). Also, it is quite disputable that is looks "almost linear".
Note from the Author or Editor:Indeed, good catch! You just made me realize that this figure changed slightly between the first edition and the second edition of the book, probably because of slight tweaks in Scikit-Learn's algorithms. Here is what the figure looks like in the first edition:
https://snipboard.io/fBgiRw.jpg
I've fixed the sentence to say "looks roughly cubic". Thanks again!</td>
</tr>
<tr>
<td id="T_bf535_row18_col0" class="data row18 col0" >Printed</td>
<td id="T_bf535_row18_col1" class="data row18 col1" >4</td>
<td id="T_bf535_row18_col2" class="data row18 col2" >142</td>
<td id="T_bf535_row18_col3" class="data row18 col3" >Page 142
last line of code snippet before "Logistic Regression" heading</td>
<td id="T_bf535_row18_col4" class="data row18 col4" >In the example implementation of early stopping, when a model with less error is encountered, the best_model variable is set to a clone of the current model. As I understand it, a clone is an duplicate of the model without data. Should best_model instead be set to a deepcopy of the current model, which includes the data (in particular the trained coefficients)?
Note from the Author or Editor:Thanks for your feedback.
Indeed, this was an error, I'm sorry about that. I fixed this error last year. So instead of:
from sklearn.base import clone
...
best_model = clone(sgd_reg)
The code is now:
from copy import deepcopy
...
best_model = deepcopy(sgd_reg)
If you run into an error in the code, please check the notebooks in my github repository at https://github.com/ageron/handson-ml2, as I try to keep them up to date, fixing bugs and updating to the latest libraries.
Thanks again!
Aurelien</td>
</tr>
<tr>
<td id="T_bf535_row19_col0" class="data row19 col0" >Printed</td>
<td id="T_bf535_row19_col1" class="data row19 col1" >5</td>
<td id="T_bf535_row19_col2" class="data row19 col2" >154</td>
<td id="T_bf535_row19_col3" class="data row19 col3" >Page 154
Right graph of Figure 5-2.</td>
<td id="T_bf535_row19_col4" class="data row19 col4" >The x-label "x0" must be replaced by "x'0" as both variables x0 and x1 are scaled in this graph.
Same in the corresponding notebook 05_support_vector_machines.ipynb.
Note from the Author or Editor:Good catch, thanks! I just fixed the book and the notebook.</td>
</tr>
<tr>
<td id="T_bf535_row20_col0" class="data row20 col0" >Printed</td>
<td id="T_bf535_row20_col1" class="data row20 col1" >5</td>
<td id="T_bf535_row20_col2" class="data row20 col2" >158</td>
<td id="T_bf535_row20_col3" class="data row20 col3" >Page 158
Last sentence</td>
<td id="T_bf535_row20_col4" class="data row20 col4" >The book says : "The hyperparameter coef0 controls how much the model is influenced by high-degree polynomials versus low-degree polynomials."
I think it should say high-degree and low-degree TERMS instead of polynomials.
Note from the Author or Editor:Good catch, thanks. I changed that sentence to:
"""
The hyperparameter `coef0` controls how much the model is influenced by high-degree terms versus low-degree terms.
"""</td>
</tr>
<tr>
<td id="T_bf535_row21_col0" class="data row21 col0" >Printed</td>
<td id="T_bf535_row21_col1" class="data row21 col1" >5</td>
<td id="T_bf535_row21_col2" class="data row21 col2" >161</td>
<td id="T_bf535_row21_col3" class="data row21 col3" >Page 161
1st paragraph, above the figure</td>
<td id="T_bf535_row21_col4" class="data row21 col4" >In chapter 5, pages 160 and 161, it says:
So γ acts like a regularization hyperparameter: if your model is overfitting, you should reduce it, and if it is under?fitting, you should increase it (similar to the C hyperparameter).
As far as I know, to avoid overfitting, we must apply limitations to the method (increasing regularization) and vice-versa. It is also stated in the solution of exercise 9 in chapter 4.
Note from the Author or Editor:Thanks for your feedback. By "regularization hyperparameter", I just meant that it is a hyperparameter that lets you control regularization. Perhaps for more clarity I should have said that it is a "reverse regularization hyperparameter", since reducing it increases regularization.
I'll update the book.</td>
</tr>
<tr>
<td id="T_bf535_row22_col0" class="data row22 col0" >PDF</td>
<td id="T_bf535_row22_col1" class="data row22 col1" >5</td>
<td id="T_bf535_row22_col2" class="data row22 col2" >165</td>
<td id="T_bf535_row22_col3" class="data row22 col3" >Page 165
Under Equation 5-2</td>
<td id="T_bf535_row22_col4" class="data row22 col4" >The following sentence:
Figure 5-12 shows the decision function that corresponds to the model in the LEFT in Figure 5-4
Should be:
Figure 5-12 shows the decision function that corresponds to the model in the RIGHT in Figure 5-4
This can be confirmed in the corresponding Jupyter notebook (https://github.com/ageron/handson-ml/blob/master/05_support_vector_machines.ipynb Input #10 and #31) which both of them are using the same variable name "svm_clf2".
Note from the Author or Editor:Good catch, thanks. Indeed, it should be "right" instead of "left.</td>
</tr>
<tr>
<td id="T_bf535_row23_col0" class="data row23 col0" >Printed, Safari Books Online</td>
<td id="T_bf535_row23_col1" class="data row23 col1" >5</td>
<td id="T_bf535_row23_col2" class="data row23 col2" >173</td>
<td id="T_bf535_row23_col3" class="data row23 col3" >Page 173
First sentence at the top of the page, right underneath Equation 5-13.</td>
<td id="T_bf535_row23_col4" class="data row23 col4" >After presenting Equation 5-13 (labelled: "Linear SVM classifier cost function"), the paragraph reads as follows:
"The first sum in the cost function will push the model to have a small weight vector w, leading to a larger margin. The second sum computes the total of all margin violations."
In the equation, there is only one summation. I believe what is meant to be said is that the first "term" of the cost function is responsible for the margin, and the second "term" of the cost function (which is the summation) is responsible for minimizing margin violations.
When you refer to them as "first sum" and "second sum" it makes one think there should be two summations in the equation.
Thank you!
Note from the Author or Editor:Thanks for your feedback. I think I wrote "first sum" and "second sum" because in my mind the first term (1/2 w^T w) is actually a summation, since it is equal to 1/2 * (w_1^2 + w_2^2 + w_3^2 + ... + w_n^2). It's half of the sum of squares of the elements of w. But I agree that it's really not clear right now, so I'll write "first term" and "second term" instead, thanks again!</td>
</tr>
<tr>
<td id="T_bf535_row24_col0" class="data row24 col0" >Printed, PDF</td>
<td id="T_bf535_row24_col1" class="data row24 col1" >5</td>
<td id="T_bf535_row24_col2" class="data row24 col2" >763</td>
<td id="T_bf535_row24_col3" class="data row24 col3" >Page 763
equation C-4</td>
<td id="T_bf535_row24_col4" class="data row24 col4" >The primal problem is to minimize Equation C-1, but a negative sign is missing on page 763 to derive equation C-4. Since our initial target is to minimize the Lagrange, now we should maximize C-4. At the same time, the second equation in C-3 is a constrained condition for the dual problem. What is more, the equation of the third bullet times a^(i) are also constrains for the dual problem. The equation in chapter 5 is also incorrect. I have a very small request, when you are using some symbols, please define it before use. For example, n_s is not defined on page 763. It should be the number of support vectors found in the problem.
Note from the Author or Editor:Thanks for your excellent feedback, I really appreciate it!
> The primal problem is to minimize Equation C-1, but a negative sign is missing on page 763 to derive equation C-4. Since our initial target is to minimize the Lagrange, now we should maximize C-4.
Unless I overlooked something, I think the sign is correct in equation C-4: in the sentence following this equation, I mentioned that the goal is to minimize the loss, not maximize it. We could reverse the sign and try to maximize the equation instead, but it's really equivalent.
> At the same time, the second equation in C-3 is a constrained condition for the dual problem. What is more, the equation of the third bullet times a^(i) are also constrains for the dual problem.
Good catch, thanks a lot. I need to add "and \sum_{i=1}^m \alpha^{(i)} t^{(i)} = 0" at the end of equation C-4.
> The equation in chapter 5 is also incorrect.
Yes, I'll add the missing constraint there as well.
> I have a very small request, when you are using some symbols, please define it before use. For example, n_s is not defined on page 763. It should be the number of support vectors found in the problem.
Indeed, I try to always define the symbols I use, but apparently I missed this one. Please tell me if you find any other missing definition.
Thanks again! :)</td>
</tr>
<tr>
<td id="T_bf535_row25_col0" class="data row25 col0" >Printed</td>
<td id="T_bf535_row25_col1" class="data row25 col1" >6</td>
<td id="T_bf535_row25_col2" class="data row25 col2" >184</td>
<td id="T_bf535_row25_col3" class="data row25 col3" >Page 184
Equation 6-4. CART cost function for regression</td>
<td id="T_bf535_row25_col4" class="data row25 col4" >I think you need to divide the MSE equation by m. The current equation represents RSS. Some machine learning books use RSS in this case. However, scikit-learn uses MSE.
https://scikit-learn.org/stable/modules/tree.html
Note from the Author or Editor:Good catch, thanks. The definition of MSE_node should be divided by m_node. Fixed! You can see the new equation at https://github.com/ageron/handson-ml2/blob/master/book_equations.pdf</td>
</tr>
<tr>
<td id="T_bf535_row26_col0" class="data row26 col0" >Other Digital Version</td>
<td id="T_bf535_row26_col1" class="data row26 col1" >7</td>
<td id="T_bf535_row26_col2" class="data row26 col2" >-1</td>
<td id="T_bf535_row26_col3" class="data row26 col3" >ch. 7
Code snippet before Extra-Trees section</td>
<td id="T_bf535_row26_col4" class="data row26 col4" >"The following BaggingClassifier is roughly equivalent to the previous RandomForestClassifier:
bag_clf = BaggingClassifier(
DecisionTreeClassifier(splitter="random", max_leaf_nodes=16),
n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1)"
splitter="random" makes this BaggingClassifier not equivalent to RandomForestClassifier since splits in RandomForestClassifier are not random, but best splits made on random subsets of features.
The following snippet fixes the issue:
bag_clf = BaggingClassifier(
DecisionTreeClassifier(splitter="best", max_features="auto",
max_leaf_nodes=16),
n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1)
With these parameters (and set random state) the predictions made by BaggingClassifier in 07_ensemble_learning_and_random_forests.ipynb will be identical to the predictions of RandomForestClassifier:
>>> np.sum(y_pred == y_pred_rf) / len(y_pred)
1.0
Note from the Author or Editor:Thanks for your feedback, great analysis. I updated the code example to be:
bag_clf = BaggingClassifier(
DecisionTreeClassifier(max_features="auto", max_leaf_nodes=16),
n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1)
I left out splitter="best" since it is the default value (and the line overflow would require changing the page layout, which I try to avoid when possible).</td>
</tr>
<tr>
<td id="T_bf535_row27_col0" class="data row27 col0" >Printed</td>
<td id="T_bf535_row27_col1" class="data row27 col1" >7</td>
<td id="T_bf535_row27_col2" class="data row27 col2" >197</td>
<td id="T_bf535_row27_col3" class="data row27 col3" >Page 197
1st paragraph in "Random Forests". 2nd sentence</td>
<td id="T_bf535_row27_col4" class="data row27 col4" >The sentence reads "Instead of building a BaggingClassifier and passing it a DecisionTreeClassifier, you can instead use the RandomForest classifier class, [..]"
The word instead is used twice in the same sentence.
It should probably read "Instead of building a BaggingClassifier and passing it a DecisionTreeClassifier, you can use the RandomForest classifier class, [..]"
Note from the Author or Editor:Good catch, thanks. Instead of two insteads, I prefer a single one instead. ;-)</td>
</tr>
<tr>
<td id="T_bf535_row28_col0" class="data row28 col0" >Printed</td>
<td id="T_bf535_row28_col1" class="data row28 col1" >7</td>
<td id="T_bf535_row28_col2" class="data row28 col2" >203</td>
<td id="T_bf535_row28_col3" class="data row28 col3" >Page 203
4th Paragraph</td>
<td id="T_bf535_row28_col4" class="data row28 col4" >In this paragraph, it says:
"Let's go through a simple regression example, using ..... (of course, Gradient Boosting also works great with regression tasks)."
Instead of "regression tasks" (in the parentheses), it should probably say "classification tasks".
Thanks!
Note from the Author or Editor:Good catch, thanks, that's what I meant. Fixed. :)</td>
</tr>
<tr>
<td id="T_bf535_row29_col0" class="data row29 col0" >PDF</td>
<td id="T_bf535_row29_col1" class="data row29 col1" >7</td>
<td id="T_bf535_row29_col2" class="data row29 col2" >211</td>
<td id="T_bf535_row29_col3" class="data row29 col3" >Page 211
First paragraph</td>
<td id="T_bf535_row29_col4" class="data row29 col4" >brew is deprecated and its github repo recommends DESlib as an alternative (https://github.com/scikit-learn-contrib/DESlib)
Note from the Author or Editor:Thanks for your feedback, indeed brew is deprecated and DESlib looks like a great replacement. I updated the book, hopefully the change will make it to the 2nd release (printed this week), or else it will be the 3rd release.</td>
</tr>
<tr>
<td id="T_bf535_row30_col0" class="data row30 col0" >Printed</td>
<td id="T_bf535_row30_col1" class="data row30 col1" >8</td>
<td id="T_bf535_row30_col2" class="data row30 col2" >233</td>
<td id="T_bf535_row30_col3" class="data row30 col3" >Page 233
Figure 8-13 Description</td>
<td id="T_bf535_row30_col4" class="data row30 col4" >First, thank you for this amazing piece of work! I found a typo on page 233 in the Print format in the figure's description. It reads "Using various techniques to reduce the Swill roll to 2D", but it should be "Swiss roll" of course.
Note from the Author or Editor:Good catch, thanks! :)</td>
</tr>
<tr>
<td id="T_bf535_row31_col0" class="data row31 col0" >Safari Books Online</td>
<td id="T_bf535_row31_col1" class="data row31 col1" >9</td>
<td id="T_bf535_row31_col2" class="data row31 col2" >-1</td>
<td id="T_bf535_row31_col3" class="data row31 col3" >Chapter 9
Paragraph before Figure 9.1</td>
<td id="T_bf535_row31_col4" class="data row31 col4" >" This is where clustering algorithms step in: many of them can easily detect the top-left cluster. It is also quite easy to see with our own eyes, but it is not so obvious that the lower-right cluster is composed of two distinct sub-clusters."
This description DOES NOT MATCH THE FIGURE. The TOP-RIGHT CLUSTER has two distinct sub-groups and the LOWER-LEFT CLUSTER easily stands out by itself. So as written, the text has a VERY confusing lack of correspondence with the figure.
Note from the Author or Editor:Great catch, thanks a lot! Indeed, it should say "lower-left cluster" and "upper-right cluster", respectively. Here's the full correct sentence:
This is where clustering algorithms step in: many of them can easily detect the lower-left cluster. It is also quite easy to see with our own eyes, but it is not so obvious that the upper-right cluster is composed of two distinct sub-clusters.
Thanks again!
Aurélien</td>
</tr>
<tr>
<td id="T_bf535_row32_col0" class="data row32 col0" >PDF</td>
<td id="T_bf535_row32_col1" class="data row32 col1" >9</td>
<td id="T_bf535_row32_col2" class="data row32 col2" >245</td>
<td id="T_bf535_row32_col3" class="data row32 col3" >Page 245
second black dot</td>
<td id="T_bf535_row32_col4" class="data row32 col4" >There should be a sign of devision "/" between D(x(i))2 and sum_{j=1}^{m} D(x(j))2 in K-Means++ initialization algorithm.
Note from the Author or Editor:Great catch, thanks.
This was a latexmath rendering issue, I just fixed it.</td>
</tr>
<tr>
<td id="T_bf535_row33_col0" class="data row33 col0" >Other Digital Version</td>
<td id="T_bf535_row33_col1" class="data row33 col1" >9</td>
<td id="T_bf535_row33_col2" class="data row33 col2" >251</td>
<td id="T_bf535_row33_col3" class="data row33 col3" >251-252
first paragraph</td>
<td id="T_bf535_row33_col4" class="data row33 col4" >Chapter 9 "Using Clustering for Preprocessing" talks about clustering as an efficient approach to dimensionality reduction.
With the example chosen, without performing a preclustering on the training data, each data has 64 features.
If we perform a preclustering (via a pipeline) with 50 clusters, this is effectively a dimensionality reduction as 50<64.
But at the end of the section, if we eventually keep k = 99, can we still speak of a dimensionality reduction?
However, I recognize that the accuracy gets better.
Note from the Author or Editor:Thanks for your feedback. Indeed, you're absolutely right: it's not dimensionality reduction anymore if we keep k=99 while the original dimensionality was 64. :/
This section definitely deserved a bit of clarification, so I changed the introduction from:
"""
Clustering can be an efficient approach to dimensionality reduction, in particular as a preprocessing step before a supervised learning algorithm.
"""
to:
"""
Clustering can be an efficient preprocessing step before a supervised learning algorithm.
"""
Then later in the section, right after the sentence "How about that? We reduced the error rate by almost 30% (from about 3.1% to about 2.2%)!", I added the following sentence:
"""
The clustering step reduces the dataset's dimensionality (from 64 to 50 dimensions), but the performance boost comes mostly from the fact that the transformed dataset is closer to being linearly separable than the original dataset, and therefore it is much easier to tackle with Logistic Regression.
"""
And I removed "But" in "But we chose the number of clusters k arbitrarily".
Hopefully this will be much clearer.
Thanks again for your helpful feedback.</td>
</tr>
<tr>
<td id="T_bf535_row34_col0" class="data row34 col0" >Safari Books Online</td>
<td id="T_bf535_row34_col1" class="data row34 col1" >10</td>
<td id="T_bf535_row34_col2" class="data row34 col2" >-1</td>
<td id="T_bf535_row34_col3" class="data row34 col3" >ch 10
Second bullet under "Creating the model using the sequential API"</td>
<td id="T_bf535_row34_col4" class="data row34 col4" >You say that if Flatten "receives input data X, it computes X.reshape(-1, 1)".
It applied to an individual data point (e.g., a Fashion MNIST) image, wouldn't this turn the image into a column vector? Don't we want (1, -1) or, better yet, (-1,), to turn it into a row?
This situation gets even more complicated if X is an entire input set, which is of shape (60000, 128, 128) in the Fashion MNIST example. We'd like it to end up (60000, 784), right? I can't see how (-1, 1) would do that.
Note from the Author or Editor:Thanks a lot for your feedback. Indeed, this is an error. I should have written: "receives input data X, it computes X.reshape(-1, 28*28)".
Fixed, thanks again!</td>
</tr>
<tr>
<td id="T_bf535_row35_col0" class="data row35 col0" >Safari Books Online</td>
<td id="T_bf535_row35_col1" class="data row35 col1" >10</td>
<td id="T_bf535_row35_col2" class="data row35 col2" >-1</td>
<td id="T_bf535_row35_col3" class="data row35 col3" >ch 10
In the paragraph just before Figure 10-9.</td>
<td id="T_bf535_row35_col4" class="data row35 col4" >"so" seems a typo in the first sentence :
If each instance can belong only so a single class, out of 3 or more possible classes...
Note from the Author or Editor:Nice catch, I just fixed this typo, thanks a lot.</td>
</tr>
<tr>
<td id="T_bf535_row36_col0" class="data row36 col0" >Safari Books Online</td>
<td id="T_bf535_row36_col1" class="data row36 col1" >10</td>
<td id="T_bf535_row36_col2" class="data row36 col2" >-1</td>
<td id="T_bf535_row36_col3" class="data row36 col3" >ch 10
under "COMPILING THE MODEL"</td>
<td id="T_bf535_row36_col4" class="data row36 col4" >It seem's "sigmoid_crossentropy" is mistakenly used instead of "binary_crossentropy" in this sentence:
If we were doing binary classification (with one or more binary labels), then we would use the "sigmoid" (i.e., logistic) activation function in the output layer instead of the "softmax" activation function, and we would use the "sigmoid_crossentropy" loss.
Note from the Author or Editor:Good catch, thanks a lot, I just fixed this.</td>
</tr>
<tr>
<td id="T_bf535_row37_col0" class="data row37 col0" >Safari Books Online</td>
<td id="T_bf535_row37_col1" class="data row37 col1" >10</td>
<td id="T_bf535_row37_col2" class="data row37 col2" >-1</td>
<td id="T_bf535_row37_col3" class="data row37 col3" >Ch10
Above Figure 10-15</td>
<td id="T_bf535_row37_col4" class="data row37 col4" >Inputs A and B, shape attributes are wrong (should be 6, 5 not 5, 6)
Note from the Author or Editor:Great catch, thanks!
The problem was in the previous sentence, it was:
"For example, suppose we want to send five features through the deep path (features 0 to 4), and six features through the wide path (features 2 to 7):"
but the words "deep" and "wide" should have been reversed:
"For example, suppose we want to send five features through the wide path (features 0 to 4), and six features through the deep path (features 2 to 7):"
Thanks again,
Aurélien</td>
</tr>
<tr>
<td id="T_bf535_row38_col0" class="data row38 col0" >Printed</td>
<td id="T_bf535_row38_col1" class="data row38 col1" >10</td>
<td id="T_bf535_row38_col2" class="data row38 col2" >285</td>
<td id="T_bf535_row38_col3" class="data row38 col3" >Page 285
Ch 10, page 285, last phrase</td>
<td id="T_bf535_row38_col4" class="data row38 col4" >In the book it is said that a Perceptron with two inputs and three outputs with a step function is a multioutput classifier. I think this Perceptron is a multilabel classifier, indeed each output is binary and not number.
Note from the Author or Editor:Good catch! Indeed, the sentence should be:
"""
This Perceptron can classify instances simultaneously into three different binary classes, which makes it a multilabel classifier.
"""
Thanks a lot!</td>
</tr>
<tr>
<td id="T_bf535_row39_col0" class="data row39 col0" >Printed, Safari Books Online</td>
<td id="T_bf535_row39_col1" class="data row39 col1" >10</td>
<td id="T_bf535_row39_col2" class="data row39 col2" >302</td>
<td id="T_bf535_row39_col3" class="data row39 col3" >Page 302
Last paragraph on page</td>
<td id="T_bf535_row39_col4" class="data row39 col4" >Instead of
"If we were doing binary classification (with one or more binary labels)"
This should be
"If we were doing multi-label classification (with one or more binary labels)"
Note from the Author or Editor:Thanks for your feedback. Indeed, this could have been clearer. I changed the sentence to:
"If we were doing binary classification or multilabel binary classification"</td>
</tr>
<tr>
<td id="T_bf535_row40_col0" class="data row40 col0" >Printed</td>
<td id="T_bf535_row40_col1" class="data row40 col1" >10</td>
<td id="T_bf535_row40_col2" class="data row40 col2" >304</td>
<td id="T_bf535_row40_col3" class="data row40 col3" >Page 304
3rd paragraph</td>
<td id="T_bf535_row40_col4" class="data row40 col4" >(2nd release)
"... set the `sample_weight` arguement (it supersedes `class_weight`)."
Acually tf.keras use `sample_weight` x `class_weight`.
Please check https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/engine/training_utils.py#L1035
Thanks.
Note from the Author or Editor:Great catch!
I replaced "(it supersedes `class_weight`)" with "(if both `class_weight` and `sample_weight` are provided, Keras multiplies them)".
Thanks for your help!</td>
</tr>
<tr>
<td id="T_bf535_row41_col0" class="data row41 col0" >Printed</td>
<td id="T_bf535_row41_col1" class="data row41 col1" >10</td>
<td id="T_bf535_row41_col2" class="data row41 col2" >306</td>
<td id="T_bf535_row41_col3" class="data row41 col3" >Page 306
Using the model to make predictions.</td>
<td id="T_bf535_row41_col4" class="data row41 col4" >We scaled the training/validation set features by dividing by 255.0. To obtain accurate performance metrics on the test set, we should also apply the same pre-processing step.
current code:
model.evaluate(X_test, y_test)
...
X_new = X_test[:3]
y_proba = model.predict(X_new)
Note from the Author or Editor:Good catch, thanks! In the Jupyter notebook, the test set is properly scaled, but for some reason I did not include that line in the book. On page 298, just after scaling the training set and the validation set, I just added the following line in the book:
X_test = X_test / 255.0</td>
</tr>
<tr>
<td id="T_bf535_row42_col0" class="data row42 col0" >Printed</td>
<td id="T_bf535_row42_col1" class="data row42 col1" >10</td>
<td id="T_bf535_row42_col2" class="data row42 col2" >306</td>
<td id="T_bf535_row42_col3" class="data row42 col3" >Page 306
1st paragraph</td>
<td id="T_bf535_row42_col4" class="data row42 col4" >The book says "you should be able to reach close to 89% validation accuracy" if you continue training. However, on page 304, before the tip, the book says that the validation accuracy already reached 89.26% after 30 epochs.
Training for 30 more epochs, I got 89.42% accuracy.
Note from the Author or Editor:Good point, thanks. I replaced 89% with 89.4%.</td>
</tr>
<tr>
<td id="T_bf535_row43_col0" class="data row43 col0" >Printed</td>
<td id="T_bf535_row43_col1" class="data row43 col1" >10</td>
<td id="T_bf535_row43_col2" class="data row43 col2" >314</td>
<td id="T_bf535_row43_col3" class="data row43 col3" >Page 314
Next to the last paragraph</td>
<td id="T_bf535_row43_col4" class="data row43 col4" >In the code example for saving a model to HDF5 file, the first line should contain `keras.models.Sequential` instead of `keras.layers.Sequential`.
Note from the Author or Editor:Great catch! Indeed, I meant to write `keras.models.Sequential` instead of `keras.layers.Sequential`.
Thanks!</td>
</tr>
<tr>
<td id="T_bf535_row44_col0" class="data row44 col0" >Printed</td>
<td id="T_bf535_row44_col1" class="data row44 col1" >10</td>
<td id="T_bf535_row44_col2" class="data row44 col2" >325</td>
<td id="T_bf535_row44_col3" class="data row44 col3" >Page 325
Question 10</td>
<td id="T_bf535_row44_col4" class="data row44 col4" >I suggest replacing "98% precision" with "98% accuracy".
Note from the Author or Editor:Good catch, thanks. I meant accuracy, not precision.</td>
</tr>
<tr>
<td id="T_bf535_row45_col0" class="data row45 col0" >Printed</td>
<td id="T_bf535_row45_col1" class="data row45 col1" >10</td>
<td id="T_bf535_row45_col2" class="data row45 col2" >328</td>
<td id="T_bf535_row45_col3" class="data row45 col3" >Page 328
Exercise 2</td>
<td id="T_bf535_row45_col4" class="data row45 col4" >A closing parenthesis is messing before the OR operator on the last line.
Note from the Author or Editor:Good catch, thanks.
This should indeed have been:
A xor B = (A and not B) or (not A and B)
Replacing "xor", "and" and "not" with the appropriate symbols.
Fixed! :)</td>
</tr>
<tr>
<td id="T_bf535_row46_col0" class="data row46 col0" >Printed</td>
<td id="T_bf535_row46_col1" class="data row46 col1" >10</td>
<td id="T_bf535_row46_col2" class="data row46 col2" >329</td>
<td id="T_bf535_row46_col3" class="data row46 col3" >Page 329-330
Last sentence</td>
<td id="T_bf535_row46_col4" class="data row46 col4" >(2nd release)
"... plotting the error, and finding the point where the error shoots up)."
I think that it's better 'loss' instead of 'error', because Learning Rate section use 'loss' to explain how to find learning rate.
Thanks.
Note from the Author or Editor:Good point, I replaced "error" with "loss" in this sentence.</td>
</tr>
<tr>
<td id="T_bf535_row47_col0" class="data row47 col0" >Printed</td>
<td id="T_bf535_row47_col1" class="data row47 col1" >10</td>
<td id="T_bf535_row47_col2" class="data row47 col2" >329</td>
<td id="T_bf535_row47_col3" class="data row47 col3" >Page 329 and 731
Exercise 6 and solution to Exercise 6</td>
<td id="T_bf535_row47_col4" class="data row47 col4" >"Weight vector" should be replaced by "weight matrix" on both pages 329 and 731. On page 731, the first sentence following the colon should probably get its own item in the list (letter 'a') and on the last item in the list, Y should be boldfaced (now printed as Y*).
Note from the Author or Editor:Great catches! Yes, I should have written "weight matrix" instead of "weight vector" on pages 329 and 731. I fixed the first formatting issue in February, it should be fine in the latest releases of the book. I just fixed the second issue (the Y in the last bullet point should be a boldface Y, not Y*).
Thanks a lot.</td>
</tr>
<tr>
<td id="T_bf535_row48_col0" class="data row48 col0" >Safari Books Online</td>
<td id="T_bf535_row48_col1" class="data row48 col1" >11</td>
<td id="T_bf535_row48_col2" class="data row48 col2" >-1</td>
<td id="T_bf535_row48_col3" class="data row48 col3" >ch 11
before Unsupervised Pretraining</td>
<td id="T_bf535_row48_col4" class="data row48 col4" >In a parenthesis: (which may be due to shear luck)
shear luck to sheer luck
Note from the Author or Editor:Indeed, it should be sheer instead of shear, thanks!</td>
</tr>
<tr>
<td id="T_bf535_row49_col0" class="data row49 col0" >Safari Books Online</td>
<td id="T_bf535_row49_col1" class="data row49 col1" >11</td>
<td id="T_bf535_row49_col2" class="data row49 col2" >-1</td>
<td id="T_bf535_row49_col3" class="data row49 col3" >ch 11
Avoiding Overfitting Through Regularization>Learning Rate Scheduling>Power scheduling</td>
<td id="T_bf535_row49_col4" class="data row49 col4" >Probably "k" in the formula should be replaced by "s".
Set the learning rate to a function of the iteration number t: η(t) = η0 / (1 + t/k)c. The initial learning rate η0, the power c (typically set to 1) and the steps s are hyperparameters. The learning rate drops at each step, and after s steps it is down to η0 / 2. After s more steps, it is down to η0 / 3. Then down to η0 / 4, then η0 / 5, and so on. As you can see, this schedule first drops quickly, then more and more slowly. Of course, this requires tuning η0, s (and possibly c).
Note from the Author or Editor:Great catch thanks! Indeed, it should be
η(t) = η0 / (1 + t/s)c</td>
</tr>
<tr>
<td id="T_bf535_row50_col0" class="data row50 col0" >Safari Books Online</td>
<td id="T_bf535_row50_col1" class="data row50 col1" >11</td>
<td id="T_bf535_row50_col2" class="data row50 col2" >-1</td>
<td id="T_bf535_row50_col3" class="data row50 col3" >ch 11
Dropout>Note</td>
<td id="T_bf535_row50_col4" class="data row50 col4" >However, it you double it, inference time will also be doubled.
to
However, if you double it, inference time will also be doubled.
Note from the Author or Editor:Thanks a lot, indeed, it's a typo. I just fixed it: should be "if you double" rather than "it you double".</td>
</tr>
<tr>
<td id="T_bf535_row51_col0" class="data row51 col0" >Printed</td>
<td id="T_bf535_row51_col1" class="data row51 col1" >11</td>
<td id="T_bf535_row51_col2" class="data row51 col2" >338</td>
<td id="T_bf535_row51_col3" class="data row51 col3" >Page 338
1st line under 1st code block</td>
<td id="T_bf535_row51_col4" class="data row51 col4" >(3rd release)
"LeakyRelu(alpha=0.2)" should be "LeakyReLU(alpha=0.2)".
Thanks.
Note from the Author or Editor:Good catch, thanks. It should indeed read LeakyReLU(alpha=0.2).</td>
</tr>
<tr>
<td id="T_bf535_row52_col0" class="data row52 col0" >Printed</td>
<td id="T_bf535_row52_col1" class="data row52 col1" >11</td>
<td id="T_bf535_row52_col2" class="data row52 col2" >344</td>
<td id="T_bf535_row52_col3" class="data row52 col3" >Page 344
2nd to last paragraph</td>
<td id="T_bf535_row52_col4" class="data row52 col4" >"... but the `fit()` method sets to it to 1" should be "... but the `fit()` method sets it to 1."
Note from the Author or Editor:Good catch, thanks. Indeed, it should have been "...but the `fit()` method sets it to 1".</td>
</tr>
<tr>
<td id="T_bf535_row53_col0" class="data row53 col0" >PDF</td>
<td id="T_bf535_row53_col1" class="data row53 col1" >11</td>
<td id="T_bf535_row53_col2" class="data row53 col2" >347</td>
<td id="T_bf535_row53_col3" class="data row53 col3" >Page 347
last paragraph</td>
<td id="T_bf535_row53_col4" class="data row53 col4" >"you clone model A’s architecture with clone.model()" => clone_model() instead of clone.model()
Note from the Author or Editor:Good catch, thanks! I fixed this typo, it should be good in the next reprint.</td>
</tr>
<tr>
<td id="T_bf535_row54_col0" class="data row54 col0" >Printed</td>
<td id="T_bf535_row54_col1" class="data row54 col1" >11</td>
<td id="T_bf535_row54_col2" class="data row54 col2" >347</td>
<td id="T_bf535_row54_col3" class="data row54 col3" >Page 347
last paragraph</td>
<td id="T_bf535_row54_col4" class="data row54 col4" >The following is the original last line of the last paragraph:
To do this, you clone model A’s architecture with clone.model(), then copy its weights (since clone_model() does not clone the weights):
But there is no such function clone.model() it should be clone_model().
Note from the Author or Editor:Great catch, thanks. Indeed, it should be clone_model(), not clone.model().</td>
</tr>
<tr>
<td id="T_bf535_row55_col0" class="data row55 col0" >Printed</td>
<td id="T_bf535_row55_col1" class="data row55 col1" >11</td>
<td id="T_bf535_row55_col2" class="data row55 col2" >354</td>
<td id="T_bf535_row55_col3" class="data row55 col3" >Page 354
Figure 11-6</td>
<td id="T_bf535_row55_col4" class="data row55 col4" >I suggest adding a negative sign before η∇_1 and η∇_2.
Note from the Author or Editor:Oh yikes, you're absolutely right! Thanks, I'm updating the figure now.</td>
</tr>
<tr>
<td id="T_bf535_row56_col0" class="data row56 col0" >Printed</td>
<td id="T_bf535_row56_col1" class="data row56 col1" >11</td>
<td id="T_bf535_row56_col2" class="data row56 col2" >356</td>
<td id="T_bf535_row56_col3" class="data row56 col3" >Page 356
Eq. 11-8</td>
<td id="T_bf535_row56_col4" class="data row56 col4" >(2nd release)
T shoud be t in 3rd and 4th eq., because next sentence is ".. t represents the iteration number (starting at 1).".
Thanks.
Note from the Author or Editor:Great catch! Indeed it should be a lowercase italic _t_.
Thanks!</td>
</tr>
<tr>
<td id="T_bf535_row57_col0" class="data row57 col0" >Printed</td>
<td id="T_bf535_row57_col1" class="data row57 col1" >11</td>
<td id="T_bf535_row57_col2" class="data row57 col2" >357</td>
<td id="T_bf535_row57_col3" class="data row57 col3" >Page 357
Below AdaMax</td>
<td id="T_bf535_row57_col4" class="data row57 col4" >(2nd release)
".. the gradients in s (with a greater weight for more recent weights)."