基於詞庫的算法分詞,是較好的中文分詞器
package busetoken;
import java.io.IOException;
import jeasy.analysis.MMAnalyzer;
public class UseJe {
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
String s="編碼規範從根本上解決了程序維護員的難題;規範的編碼閱讀和理解起來更容易,也可以快速的不費力氣的借鑑別人的編碼。對將來維護你編碼的人來說,你的編碼越優化,他們就越喜歡你的編碼,理解起來也就越快。";
MMAnalyzer mm=new MMAnalyzer();
System.out.print(mm.segment(s, "|"));
}
}
效果如下
編碼|規範|從根本上|解決|程序|維護|員|難題|規範|編碼|閱讀|理解|起來|更|容易|也可以|快速|不費力氣|借鑑|別人|編碼|將來|維護|你|編碼|的人|來說|你的|編碼|越|優化|他們|就越|喜歡|你的|編碼|理解|起來|也就|越快|
建立索引
package bindex;
import java.io.File;
import tool.FileText;
import tool.FileList;
import java.io.*;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.store.LockObtainFailedException;
public class FileIndexer {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
String indexPath ="indexes";
try {
IndexWriter indexWriter = new IndexWriter(indexPath,new MMAnalyzer());
String[] files=FileList.getFiles("htmls");
int num=files.length;
for(int i=0;i<num;i++){
Document doc=new Document();
File f=new File(files[i]);
String name=f.getName();
String content=FileText.getText(f);
String path=f.getPath();
Field field=new Field("name",name,Field.Store.YES,Field.Index.TOKENIZED);
doc.add(field);
field=new Field("content",content,Field.Store.YES,Field.Index.TOKENIZED);
doc.add(field);
field=new Field("path",path,Field.Store.YES,Field.Index.NO);
doc.add(field);
indexWriter.addDocument(doc);
System.out.println("File:"+path+name+" indexed!");
}
System.out.println("OK!");
indexWriter.close();
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (LockObtainFailedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
搜索
package bindex;
import java.io.IOException;
import java.lang.StringBuffer;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.document.*;
public class BindexSearcher {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
String indexPath="indexes";
String searchField="content";
String searchPhrase="安全";
StringBuffer sb=new StringBuffer("");
try {
IndexSearcher searcher=new IndexSearcher(indexPath);
Term t=new Term(searchField,searchPhrase);
Query q=new TermQuery(t);
Hits hs=searcher.search(q);
int num=hs.length();
for (int i=0;i<num;i++){
Document doc=hs.doc(i);
Field fname=doc.getField("name");
Field fcontent=doc.getField("content");
sb.append("name: ");
sb.append(fname.stringValue()+" ");
sb.append("content: ");
sb.append(fcontent.stringValue().substring(0, 100)+" ");
sb.append("------------"+" ");
}
searcher.close();
System.out.println(sb);
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
本篇文章來源於:網貝建站 http://www.netbei.com 原文鏈接:http://www.netbei.com/2009/1223/18997.html