public class MiniCrawler {
private Set<String> alreadyCrawledSet;
private Queue<String> unCrawlQueue;
private static Set<String> picSrcSet;
private static final int MAX_COUNT = 10;
private String newFolder;
public MiniCrawler()
{
this.alreadyCrawledSet = new HashSet();
this.unCrawlQueue = new ArrayDeque();
this.picSrcSet = new HashSet<String>();
Calendar cal = Calendar.getInstance();
int month = cal.get(Calendar.MONTH)+1;
int day = cal.get(Calendar.DAY_OF_MONTH);
int hour = cal.get(Calendar.HOUR_OF_DAY);
int minute = cal.get(Calendar.MINUTE);
int second = cal.get(Calendar.SECOND);
String folder = month+"_"+day+"_"+hour+"_"+minute+"_"+second;
newFolder = "C:\\"+folder+"\\";
File dir = new File(newFolder);
if( null == dir || !dir.exists())
{
dir.mkdir();
}
}
public void crawl(String beginUrl)
{
Parser parser = null;
try {
parser = new Parser(beginUrl);
parser.setEncoding(parser.getEncoding());
NodeList nodeList = parser.parse(null);
parseNodeList(nodeList);
}
catch(ParserException e)
{
e.printStackTrace();
}
alreadyCrawledSet.add(beginUrl);
//已经爬完或者已经爬了最大网页数
while(!unCrawlQueue.isEmpty() && alreadyCrawledSet.size()< MAX_COUNT)
{
String newUrl = unCrawlQueue.remove();
try{
parser.setResource(newUrl);
parser.setEncoding(parser.getEncoding());
NodeList nl = parser.parse(null);
parseNodeList(nl);
} catch (ParserException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
alreadyCrawledSet.add(newUrl);
}
System.out.println("Crawl Finish!");
FileOutputStream fos = null;
PrintWriter pw = null;
//把得到的全部图片地址写到文件中
try {
String picFile = newFolder+"picAddrs.txt";
fos = new FileOutputStream(new File(picFile));
pw = new PrintWriter(fos);
Iterator<String> iter = picSrcSet.iterator();
while(iter.hasNext())
{
String str = iter.next().toString();
pw.write(str);
pw.flush();
}
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}finally
{
if(null!=pw)
{
pw.close();
pw = null;
}
}
System.out.println("====== All the pic address have been writen to the file!");
}
public void parseNodeList(NodeList nodeList)
{
NodeIterator iter = nodeList.elements();
try {
while(iter.hasMoreNodes())
{
Node node = iter.nextNode();
if(node.getClass() == LinkTag.class)
{
LinkTag tag = (LinkTag)node;
String href = tag.getLink();
//System.out.println("===find a link: "+href);
if(null != href)
{
if(!alreadyCrawledSet.contains(href) && (href.indexOf("
www.169pp.com")!=-1) && href.endsWith("htm"))