@inproceedings{liu2011browsing, abstract = {To optimize the performance of web crawlers, various page importance measures have been studied to select and order URLs in crawling. Most sophisticated measures (e.g. breadth-first and PageRank) are based on link structure. In this paper, we treat the problem from another perspective and propose to measure page importance through mining user interest and behaviors from web browse logs. Unlike most existing approaches which work on single URL, in this paper, both the log mining and the crawl ordering are performed at the granularity of URL pattern. The proposed URL pattern-based crawl orderings are capable to properly predict the importance of newly created (unseen) URLs. Promising experimental results proved the feasibility of our approach.}, acmid = {2063593}, address = {New York, NY, USA}, author = {Liu, Minghai and Cai, Rui and Zhang, Ming and Zhang, Lei}, booktitle = {Proceedings of the 20th ACM international conference on Information and knowledge management}, doi = {10.1145/2063576.2063593}, interhash = {7b45567cb6a492d8354dc32401549291}, intrahash = {3ce89bd8a3d3eb6306b739fe1f4088df}, isbn = {978-1-4503-0717-8}, location = {Glasgow, Scotland, UK}, numpages = {6}, pages = {87--92}, publisher = {ACM}, title = {User browsing behavior-driven web crawling}, url = {http://doi.acm.org/10.1145/2063576.2063593}, year = 2011 } @inproceedings{cho2007rankmass, abstract = {Crawling algorithms have been the subject of extensive research and optimizations, but some important questions remain open. In particular, given the unbounded number of pages available on the Web, search-engine operators constantly struggle with the following vexing questions: When can I stop downloading the Web? How many pages should I download to cover "most" of the Web? How can I know I am not missing an important part when I stop? In this paper we provide an answer to these questions by developing, in the context of a system that is given a set of trusted pages, a family of crawling algorithms that (1) provide a theoretical guarantee on how much of the "important" part of the Web it will download after crawling a certain number of pages and (2) give a high priority to important pages during a crawl, so that the search engine can index the most important part of the Web first. We prove the correctness of our algorithms by theoretical analysis and evaluate their performance experimentally based on 141 million URLs obtained from the Web. Our experiments demonstrate that even our simple algorithm is effective in downloading important pages early on and provides high "coverage" of the Web with a relatively small number of pages.}, acmid = {1325897}, author = {Cho, Junghoo and Schonfeld, Uri}, booktitle = {Proceedings of the 33rd international conference on Very large data bases}, interhash = {c5573f70e067624e3a559996172a45ef}, intrahash = {3227ef077a463fbaa6ba1ac7aac82d06}, isbn = {978-1-59593-649-3}, location = {Vienna, Austria}, numpages = {12}, pages = {375--386}, publisher = {VLDB Endowment}, title = {RankMass crawler: a crawler with high personalized pagerank coverage guarantee}, url = {http://dl.acm.org/citation.cfm?id=1325851.1325897}, year = 2007 } @inproceedings{olston2008recrawl, abstract = {It is crucial for a web crawler to distinguish between ephemeral and persistent content. Ephemeral content (e.g., quote of the day) is usually not worth crawling, because by the time it reaches the index it is no longer representative of the web page from which it was acquired. On the other hand, content that persists across multiple page updates (e.g., recent blog postings) may be worth acquiring, because it matches the page's true content for a sustained period of time.

In this paper we characterize the longevity of information found on the web, via both empirical measurements and a generative model that coincides with these measurements. We then develop new recrawl scheduling policies that take longevity into account. As we show via experiments over real web data, our policies obtain better freshness at lower cost, compared with previous approaches.}, acmid = {1367557}, address = {New York, NY, USA}, author = {Olston, Christopher and Pandey, Sandeep}, booktitle = {Proceedings of the 17th international conference on World Wide Web}, doi = {10.1145/1367497.1367557}, interhash = {62dabc7c7aa03203804fde1b32b5fbe0}, intrahash = {68ecda3b2d943f8625add57a3a2f3a7c}, isbn = {978-1-60558-085-2}, location = {Beijing, China}, numpages = {10}, pages = {437--446}, publisher = {ACM}, title = {Recrawl scheduling based on information longevity}, url = {http://doi.acm.org/10.1145/1367497.1367557}, year = 2008 } @inproceedings{pandey2005usercentric, abstract = {Search engines are the primary gateways of information access on the Web today. Behind the scenes, search engines crawl the Web to populate a local indexed repository of Web pages, used to answer user search queries. In an aggregate sense, the Web is very dynamic, causing any repository of Web pages to become out of date over time, which in turn causes query answer quality to degrade. Given the considerable size, dynamicity, and degree of autonomy of the Web as a whole, it is not feasible for a search engine to maintain its repository exactly synchronized with the Web.In this paper we study how to schedule Web pages for selective (re)downloading into a search engine repository. The scheduling objective is to maximize the quality of the user experience for those who query the search engine. We begin with a quantitative characterization of the way in which the discrepancy between the content of the repository and the current content of the live Web impacts the quality of the user experience. This characterization leads to a user-centric metric of the quality of a search engine's local repository. We use this metric to derive a policy for scheduling Web page (re)downloading that is driven by search engine usage and free of exterior tuning parameters. We then focus on the important subproblem of scheduling refreshing of Web pages already present in the repository, and show how to compute the priorities efficiently. We provide extensive empirical comparisons of our user-centric method against prior Web page refresh strategies, using real Web data. Our results demonstrate that our method requires far fewer resources to maintain same search engine quality level for users, leaving substantially more resources available for incorporating new Web pages into the search repository.}, acmid = {1060805}, address = {New York, NY, USA}, author = {Pandey, Sandeep and Olston, Christopher}, booktitle = {Proceedings of the 14th international conference on World Wide Web}, doi = {10.1145/1060745.1060805}, interhash = {4d0e8067c9240b05c42bf8e174ffb1d1}, intrahash = {166a0a9f8d80beeab0c75961398d951f}, isbn = {1-59593-046-9}, location = {Chiba, Japan}, numpages = {11}, pages = {401--411}, publisher = {ACM}, title = {User-centric Web crawling}, url = {http://doi.acm.org/10.1145/1060745.1060805}, year = 2005 } @phdthesis{castillo2004effective, abstract = {The key factors for the success of the World Wide Web are its large size and the lack of a centralized control over its contents. Both issues are also the most important source of problems for locating information. The Web is a context in which traditional Information Retrieval methods are challenged, and given the volume of the Web and its speed of change, the coverage of modern search engines is relatively small. Moreover, the distribution of quality is very skewed, and interesting pages are scarce in comparison with the rest of the content. Web crawling is the process used by search engines to collect pages from the Web. This thesis studies Web crawling at several different levels, ranging from the long-term goal of crawling important pages first, to the short-term goal of using the network connectivity efficiently, including implementation issues that are essential for crawling in practice. We start by designing a new model and architecture for aWeb crawler that tightly integrates the crawler with the rest of the search engine, providing access to the metadata and links of the documents that can be used to guide the crawling process effectively. We implement this design in the WIRE project as an efficient Web crawler that provides an experimental framework for this research. In fact, we have used our crawler to characterize the Chilean Web, using the results as feedback to improve the crawler design. We argue that the number of pages on the Web can be considered infinite, and given that a Web crawler cannot download all the pages, it is important to capture the most important ones as early as possible during the crawling process. We propose, study, and implement algorithms for achieving this goal, showing that we can crawl 50% of a large Web collection and capture 80% of its total Pagerank value in both simulated and real Web environments. We also model and study user browsing behavior in Web sites, concluding that it is not necessary to go deeper than five levels from the home page to capture most of the pages actually visited by people, and support this conclusion with log analysis of several Web sites. We also propose several mechanisms for server cooperation to reduce network traffic and improve the representation of aWeb page in a search engine with the help of Web site managers.}, address = {Santiago, Chile}, author = {Castillo, Carlos}, institution = {University of Chile}, interhash = {36eac63e7cfae05bc7444171432a6f3f}, intrahash = {38b52bf7ccc2e1221477f5d8937c3b7d}, month = {November}, school = {School of Engineering}, title = {Effective Web Crawling}, url = {http://www.chato.cl/crawling/}, year = 2004 }