Lucene 为数据库建全文索引,之前的子查询,现在怎么实现?
以前用数据的like,但数据量越来越大,性能越来越差,最近,打算用lucene,但之前有这样的语句
select *
from company
where audit=1
and
(
(vip=0 and profile like '%关键字%')
or
(vip=1 and id in (select co_id
from tradeleads
where title like '%关键字%' or content like '%关键字%')
)
)
请问怎么用lucene实现这样的查询啊
请说明实现的步骤,如果有代码好!
[解决办法]
没有子查询这种概念,执行2次lucene搜索吧
[解决办法]
Lucene中有AND 或 OR 的查询。
贴一个查询类给你,希望对你有帮助。
- Java code
public class SearchServiceImpl implements SearchService{ private String index_Path; private final int HITS_PER_PAGE=10; public List search(String q,int start) throws IOException { try { List<Content> searchList = new ArrayList<Content>(); IndexSearcher searcher = new IndexSearcher(index_Path); Analyzer analyzer = new MMAnalyzer(2); //布查找 BooleanQuery booleanQuery = new BooleanQuery(); QueryParser parser = new QueryParser("Content",analyzer); Query query = parser.parse(q); booleanQuery.add(query,Occur.SHOULD); QueryParser parser2 = new QueryParser("Title",analyzer); Query query2 = parser2.parse(q); booleanQuery.add(query2,Occur.SHOULD); //排序 //Sort sort = new Sort(); //SortField sf = new SortField("ContentId",SortField.INT, true);//降序,false升序 //sort.setSort(sf); //Hits hits = searcher.search(booleanQuery , sort);//按上面的置排序 long timeStart = System.currentTimeMillis(); Hits hits = searcher.search(booleanQuery , Sort.RELEVANCE);//按得分排序 SimpleHTMLFormatter sHtmlF = new SimpleHTMLFormatter("<font color=#c60a00>","</font>"); Highlighter highlighter = new Highlighter(sHtmlF,new QueryScorer(booleanQuery)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); long timeUsed = System.currentTimeMillis() - timeStart; //logger.info("get resultset successfully! (" + timeUsed // + " milliseconds used)" // ); int end =0; if((start ==0 ) || (start==1)){ start = 0; end = Math.min(hits.length(), HITS_PER_PAGE); }else{ int page = start; start = (start-1)*HITS_PER_PAGE; end = Math.min(hits.length(), page*HITS_PER_PAGE); } //for (; start < hits.length(); start += HITS_PER_PAGE ) { //int end = Math.min(hits.length(), start + HITS_PER_PAGE); for (int i = start; i < end; i++) { Content bean = new Content(); //System.out.print("score=" + hits.score(i) + "\t"); bean.setDocsID(hits.id(i)); bean.setHits_per_page(HITS_PER_PAGE); bean.setTotal(hits.length()); bean.setScore(String.valueOf(hits.score(i))); bean.setTimeUsed(String.valueOf(timeUsed)); //System.out.println(bean.getTotal()); Enumeration e = hits.doc(i).fields(); while (e.hasMoreElements()) { Field f = (Field) e.nextElement(); if (f.isStored()) { if(f.name().equals("Title")){ TokenStream tokenStream = analyzer.tokenStream("Title", new StringReader(f.stringValue())); String result = highlighter.getBestFragment(tokenStream, f.stringValue()); if(result == null){ bean.setTitle(f.stringValue()); }else{ bean.setTitle(result); } }else if(f.name().equals("ContentId")){ bean.setContentId(Integer.parseInt(f.stringValue())); }else if(f.name().equals("DateTime")){ bean.setDateTime(TimeUtil.getDateTime(f.stringValue())); }else if(f.name().equals("Content")){ //HighLight 情查看Lucene文 TokenStream tokenStream = analyzer.tokenStream("Content", new StringReader(f.stringValue())); int maxNumFragmentsRequired = 3; String result = highlighter.getBestFragments(tokenStream, f.stringValue(), maxNumFragmentsRequired, "......"); bean.setContent(result); } } } searchList.add(bean); } //} searcher.close(); return searchList; } catch (ParseException e) { e.printStackTrace(); return null; } } /** * @return index_Path */ public String getIndex_Path() { return index_Path; } /** * @param index_Path The index_Path to set */ public void setIndex_Path(String index_Path) { this.index_Path = index_Path; }}