forked from seanstappas/neural-net
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
37 lines (34 loc) · 1.77 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@incollection{lecun_efficient_1998,
title = {Efficient {BackProp}},
isbn = {978-3-540-65311-0 978-3-540-49430-0},
url = {https://link.springer.com/chapter/10.1007/3-540-49430-8_2},
series = {Lecture Notes in Computer Science},
abstract = {The convergence of back-propagation learning is analyzed so as to explain common phenomenon observed by practitioners. Many undesirable behaviors of backprop can be avoided with tricks that are rarely exposed in serious technical publications. This paper gives some of those tricks, and offers explanations of why they work. Many authors have suggested that second-order optimization methods are advantageous for neural net training. It is shown that most “classical” second-order methods are impractical for large neural networks. A few methods are proposed that do not have these limitations.},
pages = {9--50},
booktitle = {Neural Networks: Tricks of the Trade},
publisher = {Springer, Berlin, Heidelberg},
author = {{LeCun}, Yann and Bottou, Leon and Orr, Genevieve B. and Müller, Klaus-Robert},
urldate = {2017-11-29},
date = {1998},
langid = {english},
note = {{DOI}: 10.1007/3-540-49430-8\_2}
}
@online{steeves_2015,
title={{MNIST Hand Written Digits Classification Benchmark}},
url={https://knowm.org/mnist-hand-written-digits-classification-benchmark/},
journal={Knowm.org},
author={Steeves, Jacob},
year={2015},
month={Sept}
}
@online{tensorflow,
title = {{Deep MNIST for Experts}},
url = {https://www.tensorflow.org/get_started/mnist/pros},
titleaddon = {{TensorFlow}},
urldate = {2017-11-29}
}
@online{ochre,
title = {{OCHRE - Optical Character Recognition}},
url = {http://www.sund.de/netze/applets/BPN/bpn2/ochre.html},
urldate = {2017-11-29}
}