@inproceedings{cavnar1994ngrambased, 1 = {YnBsaXN0MDDUAQIDBAUIJidUJHRvcFgkb2JqZWN0c1gkdmVyc2lvblkkYXJjaGl2ZXLRBgdUcm9vdIABqAkKFRYXGyIjVSRudWxs0wsMDQ4RFFpOUy5vYmplY3RzV05TLmtleXNWJGNsYXNzog8QgASABqISE4ACgAOAB1lhbGlhc0RhdGFccmVsYXRpdmVQYXRo0hgNGRpXTlMuZGF0YU8RAZIAAAAAAZIAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMa9fPRIKwAAABHTpQgxOTk0LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEdOmxTiV5AAAAAAAAAAAAAEABAAACSAAAAAAAAAAAAAAAAAAAAAGQ2F2bmFyABAACAAAxr1g1AAAABEACAAAxTiH1AAAAAEAFAAR06UAEZpVAAWrswAFq6YAAJGfAAIAQU1hY2ludG9zaCBIRDpVc2VyczptYXRoaWFzOkRvY3VtZW50czp6ZXR0ZWxrYXN0ZW46Q2F2bmFyOjE5OTQucGRmAAAOABIACAAxADkAOQA0AC4AcABkAGYADwAaAAwATQBhAGMAaQBuAHQAbwBzAGgAIABIAEQAEgA0VXNlcnMvbWF0aGlhcy9Eb2N1bWVudHMvemV0dGVsa2FzdGVuL0Nhdm5hci8xOTk0LnBkZgATAAEvAAAVAAIADv//AACABdIcHR4fWCRjbGFzc2VzWiRjbGFzc25hbWWjHyAhXU5TTXV0YWJsZURhdGFWTlNEYXRhWE5TT2JqZWN0XxApLi4vRG9jdW1lbnRzL3pldHRlbGthc3Rlbi9DYXZuYXIvMTk5NC5wZGbSHB0kJaIlIVxOU0RpY3Rpb25hcnkSAAGGoF8QD05TS2V5ZWRBcmNoaXZlcgAIABEAFgAfACgAMgA1ADoAPABFAEsAUgBdAGUAbABvAHEAcwB2AHgAegB8AIYAkwCYAKACNgI4Aj0CRgJRAlUCYwJqAnMCnwKkAqcCtAK5AAAAAAAAAgEAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAss=}, abstract = {Text categorization is a fundamental task in document processing, allowing the automated handling of enormous streams of documents in electronic form. One difficulty in handling some classes of documents is the presence of different kinds of textual errors, such as spelling and grammatical errors in email, and character recognition errors in documents that come through OCR. Text categorization must work reliably on all input, and thus must tolerate some level of these kinds of problems. We describe here an N-gram-based approach to text categorization that is tolerant of textual errors. The system is small, fast and robust. This system worked very well for language classification, achieving in one test a 99.8 % correct classification rate on Usenet newsgroup articles written in different languages. The system also worked reasonably well for classifying articles from a number of different computer-oriented newsgroups according to subject, achieving as high as an 80 % correct classification rate. There are also several obvious directions for improving the system's classification performance in those cases where it did not do as well. The system is based on calculating and comparing profiles of N-gram frequencies. First, we use the system to compute profiles on training set data that represent the various categories, e.g., language samples or newsgroup content samples. Then the system computes a profile for a particular document that is to be classified. Finally, the system computes a distance measure between the document's profile and each of the}, added = {2009-08-17 11:40:57 +0200}, address = {Las Vegas}, author = {Cavnar, William B. and Trenkle, John M.}, booktitle = {Symposium On Document Analysis and Information Retrieval}, interhash = {f0473fcb06a7b07f51bbfdc71b4b063c}, intrahash = {6922ef3ab653ff35cbe9117227816a24}, modified = {2010-01-04 09:30:08 +0100}, pages = {161--175}, title = {N-Gram-Based Text Categorization}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.53.9367}, urldate = {20.10.2008}, year = 1994 }