Back
Breaking SVM Complexity with Cross-Training
We propose an algorithm for selectively removing examples from the training set using probabilistic estimates related to editing algorithms (Devijver and Kittler82). The procedure creates a separable distribution of training examples with minimal impact on the decision boundary position. It breaks the linear dependency between the number of SVs and the number of training examples, and sharply reduces the complexity of SVMs during both the training and prediction stages.
@inproceedings{2846, title = {Breaking SVM Complexity with Cross-Training}, journal = {Advances in Neural Information Processing Systems}, booktitle = {Advances in Neural Information Processing Systems 17}, abstract = {We propose an algorithm for selectively removing examples from the training set using probabilistic estimates related to editing algorithms (Devijver and Kittler82). The procedure creates a separable distribution of training examples with minimal impact on the decision boundary position. It breaks the linear dependency between the number of SVs and the number of training examples, and sharply reduces the complexity of SVMs during both the training and prediction stages.}, pages = {81-88}, editors = {Saul, L.K. , Y. Weiss, L. Bottou}, publisher = {MIT Press}, organization = {Max-Planck-Gesellschaft}, school = {Biologische Kybernetik}, address = {Cambridge, MA, USA}, month = jul, year = {2005}, slug = {2846}, author = {Bakir, GH. and Bottou, L. and Weston, J.}, month_numeric = {7} }