Package java.io

Examples of java.io.StreamTokenizer


    /**
     * Sets up the stream tokenizer
     */
    private void setup() {
        st = new StreamTokenizer(this);
        st.resetSyntax();
        st.eolIsSignificant(false);
        st.lowerCaseMode(true);

        // Parse numbers as words
View Full Code Here


   * @param f
   * @param source
   * @throws WinkParseException
   */
  public static void updateFunctionInfo(final FunctionObject f, final String source) throws WinkParseException {
    final StreamTokenizer st = ParserUtils.getStreamTokenizer(source);
    boolean end = false;
    boolean firstWord = true;
    while (!end) {
      int ttype;
      try {
        ttype = st.nextToken();
      } catch (final IOException e) {
        throw new WinkParseException(e);
      }

      switch (ttype) {
View Full Code Here

   * @param source
   * @param relativeShiftLine
   * @throws WinkParseException
   */
  public static void updateLiteralLines(final LiteralObject lt, final String source, final int relativeShiftLine) throws WinkParseException {
    final StreamTokenizer st = ParserUtils.getStreamTokenizer(source);
    boolean end = false;
    boolean identifyBegin = false;
    boolean inBlock = false;
    boolean afterBlock = false;
    final StringBuffer afterBlockBuffer = new StringBuffer();

    int lpCount = 0;
    int rpCount = 0;

    while (!end) {
      int ttype;
      try {
        ttype = st.nextToken();
      } catch (final IOException e) {
        throw new WinkParseException(e);
      }

      switch (ttype) {
      case StreamTokenizer.TT_EOF:
        end = true;
        break;
      case StreamTokenizer.TT_WORD:
        if (!identifyBegin) {
          final String identifier = st.sval;
          final String ltName = lt.getNamespace().getLastName();
          if (identifier.indexOf(ltName) != -1) {
            final int ln = relativeShiftLine + (st.lineno() - 1);
            lt.setLineStart(ln);
            identifyBegin = true;
          }
        }
        if (afterBlock) {
          end = true;
        }
        break;
      case StreamTokenizer.TT_NUMBER:
        if (afterBlock) {
          end = true;
        }
        break;
      default:
        if (afterBlock) {
          afterBlockBuffer.append((char) ttype);
          final String c = new String(new StringBuffer().append((char) ttype));

          if (c.equals(",") || c.equals(";")) {
            if (afterBlockBuffer.toString().trim().length() == 1) {
              final int ln = relativeShiftLine + (st.lineno() - 1);
              lt.setLineEnd(ln);
              end = true;
            }
          }
        } else if (identifyBegin) {
          final String c = new String(new StringBuffer().append((char) ttype));
          if (c.equals("{")) {
            if (lpCount == 0) {
              inBlock = true;
            }
            lpCount++;
          } else if (c.equals("}")) {
            if (inBlock) {
              rpCount++;
            }
          }
          if (inBlock) {
            if (lpCount == rpCount) {
              final int ln = relativeShiftLine + (st.lineno() - 1);
              lt.setLineEnd(ln);
              inBlock = false;
              afterBlock = true;
              // end = true;
            }
View Full Code Here

   * @param lt
   * @param source
   * @throws WinkParseException
   */
  public static void updateLiteralChars(final LiteralObject lt, final String source) throws WinkParseException {
    final StreamTokenizer st = ParserUtils.getStreamTokenizer(source);
    boolean end = false;
    boolean firstWord = true;
    while (!end) {
      int ttype;
      try {
        ttype = st.nextToken();
      } catch (final IOException e) {
        throw new WinkParseException(e);
      }

      switch (ttype) {
View Full Code Here

   * @param method
   * @return
   * @throws IOException
   */
  public static LinkedHashMap<Integer, Integer> getBlockLines(final String content, final String method) throws IOException {
    final StreamTokenizer st = ParserUtils.getStreamTokenizer(content);
    final LinkedHashMap<Integer, Integer> result = new LinkedHashMap<Integer, Integer>();

    boolean end = false;
    boolean identifyBegin = false;
    boolean justAfterBegin = false;
    boolean inBlock = false;
    int lpCount = 0;
    int rpCount = 0;
    int lineStart = 0;
    int lineEnd = 0;

    while (!end) {
      final int ttype = st.nextToken();

      switch (ttype) {
      case StreamTokenizer.TT_EOF:
        end = true;
        break;
      case StreamTokenizer.TT_WORD:
        if (!identifyBegin) {
          if (st.sval.indexOf(method) != -1) {
            identifyBegin = true;
            justAfterBegin = true;
          }
        }
        break;
      default:
        final String c = String.valueOf((char) ttype);
        if (justAfterBegin) {
          if (c.equals("(")) {
            identifyBegin = false;
            justAfterBegin = false;
          }
        }

        if (identifyBegin) {
          justAfterBegin = false;
          if (c.equals("{")) {
            if (lpCount == 0) {
              lineStart = st.lineno();
              inBlock = true;
            }
            lpCount++;
          } else if (c.equals("}")) {
            if (inBlock) {
              rpCount++;
            }
          }
          if (inBlock) {
            if (lpCount == rpCount) {
              lineEnd = st.lineno();
              inBlock = false;
              result.put(lineStart, lineEnd);

              identifyBegin = false;
              inBlock = false;
View Full Code Here

  /**
   * @param source
   * @return
   */
  public static StreamTokenizer getStreamTokenizer(final String source) {
    final StreamTokenizer st = new StreamTokenizer(new StringReader(source));
    st.wordChars('_', '_');
    st.wordChars('/', '/');
    st.wordChars('[', '[');
    st.wordChars(']', ']');
    st.wordChars('"', '"');
    st.slashStarComments(true);
    st.slashSlashComments(true);
    return st;
  }
View Full Code Here

     *        string to be parsed
     * @return map of name-value pairs
     */
    private Map<String, String> parseStringArguments(String string) {
        try {
            StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(
                    string));
            Map<String, String> result = new HashMap<>();
            tokenizer.nextToken();
            while (tokenizer.ttype != StreamTokenizer.TT_EOF) {
                verify(tokenizer.ttype != StreamTokenizer.TT_WORD,
                        "Invalid processing instruction argument.");
                String name = tokenizer.sval;
                verify('=' != tokenizer.nextToken(), "Expecting token '='");
                tokenizer.nextToken();
                verify(tokenizer.ttype != '\"' && tokenizer.ttype != '\'',
                        "Invalid processing instruction argument.");
                String value = tokenizer.sval;
                result.put(name, value);
                tokenizer.nextToken();
            }
            return result;
        } catch (IOException e) {
            throw new RDFParserException(e, "I/O error", getDocumentLocator());
        }
View Full Code Here

      return Collections.emptyList();
    }
    try {
      remote.sessionBegin();
      InputStreamReader is = new InputStreamReader(remote.branches(nodes), "US-ASCII");
      StreamTokenizer st = new StreamTokenizer(is);
      st.ordinaryChars('0', '9');
      st.wordChars('0', '9');
      st.eolIsSignificant(false);
      ArrayList<Nodeid> parseResult = new ArrayList<Nodeid>(nodes.size() * 4);
      while (st.nextToken() != StreamTokenizer.TT_EOF) {
        parseResult.add(Nodeid.fromAscii(st.sval));
      }
      if (parseResult.size() != nodes.size() * 4) {
        throw new HgRemoteConnectionException(String.format("Bad number of nodeids in result (shall be factor 4), expected %d, got %d", nodes.size()*4, parseResult.size()));
      }
View Full Code Here

     * the last row is followed by a blank line.
   @param input the input stream.
   */

   public static jMatrix read (BufferedReader input) throws java.io.IOException {
      StreamTokenizer tokenizer= new StreamTokenizer(input);

      // Although StreamTokenizer will parse numbers, it doesn't recognize
      // scientific notation (E or D); however, Double.valueOf does.
      // The strategy here is to disable StreamTokenizer's number parsing.
      // We'll only get whitespace delimited words, EOL's and EOF's.
      // These words should all be numbers, for Double.valueOf to parse.

      tokenizer.resetSyntax();
      tokenizer.wordChars(0,255);
      tokenizer.whitespaceChars(0, ' ');
      tokenizer.eolIsSignificant(true);
      java.util.Vector v = new java.util.Vector();

      // Ignore initial empty lines
      while (tokenizer.nextToken() == StreamTokenizer.TT_EOL);
      if (tokenizer.ttype == StreamTokenizer.TT_EOF)
  throw new java.io.IOException("Unexpected EOF on matrix read.");
      do {
         v.addElement(Double.valueOf(tokenizer.sval)); // Read & store 1st row.
      } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD);

      int n = v.size()// Now we've got the number of columns!
      double row[] = new double[n];
      for (int j=0; j<n; j++// extract the elements of the 1st row.
         row[j]=((Double)v.elementAt(j)).doubleValue();
      v.removeAllElements();
      v.addElement(row)// Start storing rows instead of columns.
      while (tokenizer.nextToken() == StreamTokenizer.TT_WORD) {
         // While non-empty lines
         v.addElement(row = new double[n]);
         int j = 0;
         do {
            if (j >= n) throw new java.io.IOException
               ("Row " + v.size() + " is too long.");
            row[j++] = Double.valueOf(tokenizer.sval).doubleValue();
         } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD);
         if (j < n) throw new java.io.IOException
            ("Row " + v.size() + " is too short.");
      }
      int m = v.size()// Now we've got the number of rows.
      double[][] A = new double[m][];
View Full Code Here

     *
     * @throws RuntimeException if a parsing error occurs.
     */
    public List<BeanAST> parse()
    {
        tokenizer = new StreamTokenizer( new BufferedReader( new InputStreamReader(stream) ) );
        tokenizer.resetSyntax();
        tokenizer.wordChars('a', 'z');
        tokenizer.wordChars('A', 'Z');
        tokenizer.wordChars('0', '9');
        tokenizer.wordChars('_', '_');
View Full Code Here

TOP

Related Classes of java.io.StreamTokenizer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.