日期:2014-05-20  浏览次数:20913 次

急,高手指教,用poi来解析.doc,docx,rtf,txt
想实现解析简历功能,用poi来解析.doc,docx,rtf,txt。
但是要过滤出来一些信息,像name,tel,email,address这些信息.(简历全是英文的)
请问大家有什么好方法、好的思路吗??
都好长时间了,一直没有想出什么好方法来实现,请高手指教。,谢谢~

------解决方案--------------------
解析word很简单,我给楼主贴一个,就是不知道怎么过滤
Java code

package com.psp.util;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;

import org.apache.poi.poifs.filesystem.DocumentEntry;
import org.apache.poi.poifs.filesystem.DocumentInputStream;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
import org.apache.poi.util.LittleEndian;

public class WordExtractor {
    public WordExtractor() {
    }

    public String extractText(InputStream in) throws IOException {
        ArrayList text = new ArrayList();
        POIFSFileSystem fsys = new POIFSFileSystem(in);

        DocumentEntry headerProps = (DocumentEntry) fsys.getRoot().getEntry("WordDocument");
        DocumentInputStream din = fsys.createDocumentInputStream("WordDocument");
        byte[] header = new byte[headerProps.getSize()];

        din.read(header);
        din.close();
        int info = LittleEndian.getShort(header, 0xa);

        boolean useTable1 = (info & 0x200) != 0;

        int complexOffset = LittleEndian.getInt(header, 0x1a2);
        
        String tableName = null;
        if (useTable1) {
            tableName = "1Table";
        } else {
            tableName = "0Table";
        }

        DocumentEntry table = (DocumentEntry) fsys.getRoot().getEntry(tableName);
        byte[] tableStream = new byte[table.getSize()];

        din = fsys.createDocumentInputStream(tableName);

        din.read(tableStream);
        din.close();

        din = null;
        fsys = null;
        table = null;
        headerProps = null;

        int multiple = findText(tableStream, complexOffset, text);

        StringBuffer sb = new StringBuffer();
        int size = text.size();
        tableStream = null;

        for (int x = 0; x < size; x++) {
            
            WordTextPiece nextPiece = (WordTextPiece) text.get(x);
            int start = nextPiece.getStart();
            int length = nextPiece.getLength();

            boolean unicode = nextPiece.usesUnicode();
            String toStr = null;
            if (unicode) {
                toStr = new String(header, start, length * multiple, "UTF-16LE");
            } else {
                toStr = new String(header, start, length, "ISO-8859-1");
            }
            sb.append(toStr).append(" ");

        }
        return sb.toString();
    }

    private static int findText(byte[] tableStream, int complexOffset, ArrayList text)
        throws IOException {
        //actual text
        int pos = complexOffset;
        int multiple = 2;
        //skips through the prms before we reach the piece table. These contain    data
        //for actual fast saved files
        while (tableStream[pos] == 1) {
            pos++;
            int skip = LittleEndian.getShort(tableStream, pos);
            pos += 2 + skip;
        }
        if (tableStream[pos] != 2) {
            throw new IOException("corrupted Word file");
        } else {
            //parse out the text pieces
            int pieceTableSize = LittleEndian.getInt(tableStream, ++pos);
            pos += 4;
            int pieces = (pieceTableSize - 4) / 12;
            for (int x = 0; x < pieces; x++) {
                int filePos =
                    LittleEndian.getInt(tableStream, pos + ((pieces + 1) * 4) + (x * 8) + 2);
                boolean unicode = false;
                if ((filePos & 0x40000000) == 0) {
                    unicode = true;
                } else {
                    unicode = false;
                    multiple = 1;
                    filePos &= ~(0x40000000); //gives me FC in doc stream
                    filePos /= 2;
                }
                int totLength =
                    LittleEndian.getInt(tableStream, pos + (x + 1) * 4)
                        - LittleEndian.getInt(tableStream, pos + (x * 4));

                WordTextPiece piece = new WordTextPiece(filePos, totLength, unicode);
                text.add(piece);

            }

        }
        return multiple;
    }
    public static void main(String[] args){
        WordExtractor w  = new WordExtractor();
        //POIFSFileSystem ps = new POIFSFileSystem();
        try{
            
            File file = new File("E:\\资料\\相关问题和论文.doc");
            
            InputStream in = new FileInputStream(file);
            String s = w.extractText(in);
            System.out.println(s);
    
            
        }catch(Exception e){
            e.printStackTrace();
        }
                
    }

}
class WordTextPiece {
    private int _fcStart;
    private boolean _usesUnicode;
    private int _length;

    public WordTextPiece(int start, int length, boolean unicode) {
        _usesUnicode = unicode;
        _length = length;
        _fcStart = start;
    }
    public boolean usesUnicode() {
        return _usesUnicode;
    }

    public int getStart() {
        return _fcStart;
    }
    public int getLength() {
        return _length;
    }

}