@inproceedings{Bajramovic04:ACO, type = {inproceedings}, key = {Bajramovic04:ACO}, title = {A Comparison of First- and Second-Order Training Algorithms for Dynamic Neural Networks}, author = {Ferid Bajramovic and Christian Gruber and Bernhard Sick}, booktitle = {IEEE International Joint Conference on Neural Networks}, year = {2004}, pages = {837-842}, volume = {2}, abstract = {Neural networks are often used for time series processing. Temporal information can, for example, be modeled using the dynamic neural network (DYNN) paradigm which combines delay-elements in feedforward direction with recurrent connections. If networks were applied to process time series, learning typically becomes a very complex and time consuming task. Therefore, training algorithms are needed which are both accurate and fast. In this article six learning algorithms, namely temporal backpropagation through time, resilient propagation, Quasi-Newton, scaled conjugate gradient, backpropagation based on partial Quasi-Newton, and a combination of the latter two algorithms, are presented and applied to DYNN. The various learning algorithms are compared with respect to duration of training, approximation and generalization capability, and convergence speed. Each algorithm is evaluated by means of three real-world application examples, the prediction of the number of users in a computer pool, the prediction of the energy consumption in a building, and the verification of a person using her/his signature. With respect to the experiments conducted here, RProp turns out to be the best algorithm to train DYNN.}, doi = {10.1109/IJCNN.2004.1380038}, url = {http://ieeexplore.ieee.org/abstract/document/1380038/}, }