Back
Training and Approximation of a Primal Multiclass Support Vector Machine
We revisit the multiclass support vector machine (SVM) and generalize the formulation to convex loss functions and joint feature maps. Motivated by recent work [Chapelle, 2006] we use logistic loss and softmax to enable gradient based primal optimization. Kernels are incorporated via kernel principal component analysis (KPCA), which naturally leads to approximation methods for large scale problems. We investigate similarities and differences to previous multiclass SVM approaches. Experimental comparisons to previous approaches and to the popular one-vs-rest SVM are presented on several different datasets.
@inproceedings{3983, title = {Training and Approximation of a Primal Multiclass Support Vector Machine}, journal = {Proceedings of the 12th International Conference on Applied Stochastic Models and Data Analysis (ASMDA 2007)}, booktitle = {ASMDA 2007}, abstract = {We revisit the multiclass support vector machine (SVM) and generalize the formulation to convex loss functions and joint feature maps. Motivated by recent work [Chapelle, 2006] we use logistic loss and softmax to enable gradient based primal optimization. Kernels are incorporated via kernel principal component analysis (KPCA), which naturally leads to approximation methods for large scale problems. We investigate similarities and differences to previous multiclass SVM approaches. Experimental comparisons to previous approaches and to the popular one-vs-rest SVM are presented on several different datasets.}, pages = {1-8}, editors = {Skiadas, C. H.}, organization = {Max-Planck-Gesellschaft}, institution = {Max Planck Institute for Biological Cybernetics, Tuebingen, Germany}, school = {Biologische Kybernetik}, month = jun, year = {2007}, slug = {3983}, author = {Zien, A. and Bona, FD. and Ong, CS.}, month_numeric = {6} }