diff -r -u jruby-1.0.2/src/org/jruby/lexer/yacc/HeredocTerm.java jruby-1.0.2/src/org/jruby/lexer/yacc/HeredocTerm.java
--- jruby-1.0.2/src/org/jruby/lexer/yacc/HeredocTerm.java	2007-11-01 10:31:58.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/lexer/yacc/HeredocTerm.java	2007-11-02 00:47:37.000000000 -0700
@@ -48,12 +48,25 @@
     public int parseString(RubyYaccLexer lexer, LexerSource src) throws java.io.IOException {
         boolean indent = (func & RubyYaccLexer.STR_FUNC_INDENT) != 0;
         ByteList str = new ByteList();
-
+        // BEGIN NETBEANS MODIFICATIONS
+        if (lexer.getPreserveSpaces()) {
+            boolean done = src.matchString(eos, indent);
+            if (done) {
+                lexer.yaccValue = new StrNode(lexer.getPosition(), str);
+                lexer.setStrTerm(new StringTerm(-1, '\0', '\0'));
+                src.setIsANewLine(true);
+                return Tokens.tSTRING_END;
+            }
+        }
+        // END NETBEANS MODIFICATIONS
         if (src.peek((char) RubyYaccLexer.EOF)) {
             throw new SyntaxException(src.getPosition(), "can't find string \"" + eos + "\" anywhere before EOF");
         }
         if (src.wasBeginOfLine() && src.matchString(eos, indent)) {
-            src.unreadMany(lastLine);
+            // BEGIN NETBEANS MODIFICATIONS
+            if (lastLine != null)
+            // END NETBEANS MODIFICATIONS
+               src.unreadMany(lastLine);
             lexer.yaccValue = new Token(eos, lexer.getPosition());
             return Tokens.tSTRING_END;
         }
@@ -88,10 +101,20 @@
                 switch (c = src.read()) {
                 case '$':
                 case '@':
+                    // BEGIN NETBEANS MODIFICATIONS
+                    if (processingEmbedded == LOOKING_FOR_EMBEDDED) {
+                        processingEmbedded = EMBEDDED_DVAR;
+                    }
+                    // END NETBEANS MODIFICATIONS
                     src.unread(c);
                     lexer.setValue(new Token("#" + c, lexer.getPosition()));
                     return Tokens.tSTRING_DVAR;
                 case '{':
+                    // BEGIN NETBEANS MODIFICATIONS
+                    if (processingEmbedded == LOOKING_FOR_EMBEDDED) {
+                        processingEmbedded = EMBEDDED_DEXPR;
+                    }
+                    // END NETBEANS MODIFICATIONS
                     lexer.setValue(new Token("#" + c, lexer.getPosition()));
                     return Tokens.tSTRING_DBEG;
                 }
@@ -103,9 +126,20 @@
             // MRI has extra pointer which makes our code look a little bit more strange in 
             // comparison
             do {
-                if ((c = new StringTerm(func, '\n', '\0').parseStringIntoBuffer(src, buffer)) == RubyYaccLexer.EOF) {
+                // BEGIN NETBEANS MODIFICATIONS
+                //if ((c = new StringTerm(func, '\n', '\0').parseStringIntoBuffer(src, buffer)) == RubyYaccLexer.EOF) {                
+                StringTerm stringTerm = new StringTerm(func, '\n', '\0');
+                stringTerm.processingEmbedded = processingEmbedded;
+                if ((c = stringTerm.parseStringIntoBuffer(src, buffer)) == RubyYaccLexer.EOF) {
+                // END NETBEANS MODIFICATIONS    
                     throw new SyntaxException(src.getPosition(), "can't find string \"" + eos + "\" anywhere before EOF");
                 }
+                // BEGIN NETBEANS MODIFICATIONS
+                // Completed expansion token
+                if (processingEmbedded == EMBEDDED_DVAR || processingEmbedded == EMBEDDED_DEXPR) {
+                    processingEmbedded = LOOKING_FOR_EMBEDDED;
+                }
+                // END NETBEANS MODIFICATIONS    
                 if (c != '\n') {
                     lexer.yaccValue = new StrNode(lexer.getPosition(), buffer);
                     return Tokens.tSTRING_CONTENT;
@@ -121,9 +155,139 @@
             str = buffer;
         }
 
-        src.unreadMany(lastLine);
+        // BEGIN NETBEANS MODIFICATIONS
+        // DVARs last only for a single string token so shut if off here.
+        if (processingEmbedded == EMBEDDED_DVAR) {
+            processingEmbedded = LOOKING_FOR_EMBEDDED;
+//        } else if ((processingEmbedded == EMBEDDED_DEXPR) && (str.length() == 0)) {
+//            // Unbalanced expression - see #96485
+//            processingEmbedded = LOOKING_FOR_EMBEDDED;
+        }
+        // END NETBEANS MODIFICATIONS
+
+        // BEGIN NETBEANS MODIFICATIONS
+        if (lastLine != null)
+        // END NETBEANS MODIFICATIONS
+            src.unreadMany(lastLine);
+        // BEGIN NETBEANS MODIFICATIONS
+        // When handling heredocs in syntax highlighting mode, process the end marker
+        // separately
+        if (lastLine == null) {
+            src.unreadMany(eos+"\n"); // \r?
+            //done = true;
+        } else {
+        // END NETBEANS MODIFICATIONS
         lexer.setStrTerm(new StringTerm(-1, '\0', '\0'));
+        // BEGIN NETBEANS MODIFICATIONS
+        }
+        // END NETBEANS MODIFICATIONS
         lexer.yaccValue = new StrNode(lexer.getPosition(), str);
         return Tokens.tSTRING_CONTENT;
     }
+
+    // BEGIN NETBEANS MODIFICATIONS
+    /** 
+     * Report whether this string should be substituting things like \n into newlines.
+     * E.g. are we dealing with a "" string or a '' string (or their alternate representations)
+     */
+    public boolean isSubstituting() {
+        return (func & RubyYaccLexer.STR_FUNC_EXPAND) != 0;
+    }
+
+    /**
+     * Record any mutable state from this StrTerm such that it can
+     * be set back to this exact state through a call to {@link setMutableState}
+     * later on. Necessary for incremental lexing where we may restart
+     * lexing parts of a string (since they can be split up due to
+     * Ruby embedding like "Evaluated by Ruby: #{foo}".
+     */
+    public Object getMutableState() {
+        return new MutableTermState(processingEmbedded);
+    }
+
+    /**
+     * Apply the given state object (earlier returned by {@link getMutableState})
+     * to this StringTerm to revert state to the earlier snapshot.
+     */
+    public void setMutableState(Object o) {
+        MutableTermState state = (MutableTermState)o;
+        if (state != null) {
+            this.processingEmbedded = state.processingEmbedded;
+        }
+    }
+    
+    public void splitEmbeddedTokens() {
+        if (processingEmbedded == IGNORE_EMBEDDED) {
+            processingEmbedded = LOOKING_FOR_EMBEDDED;
+        }
+    }
+
+    private class MutableTermState {
+        private MutableTermState(int embeddedCode) {
+            this.processingEmbedded = embeddedCode;
+        }
+        
+        public boolean equals(Object obj) {
+            if (obj == null)
+                return false;
+            if (getClass() != obj.getClass())
+                return false;
+            final MutableTermState other = (MutableTermState) obj;
+
+            if (this.processingEmbedded != other.processingEmbedded)
+                return false;
+            return true;
+        }
+
+        public int hashCode() {
+            int hash = 7;
+
+            hash = 83 * hash + this.processingEmbedded;
+            return hash;
+        }
+        
+        public String toString() {
+            return "HeredocTermState[" + processingEmbedded + "]";
+        }
+        
+        private int processingEmbedded;
+    }
+    
+    // Equals - primarily for unit testing (incremental lexing tests
+    // where we do full-file-lexing and compare state to incremental lexing)
+    public boolean equals(Object obj) {
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        final HeredocTerm other = (HeredocTerm) obj;
+
+        if (this.eos != other.eos &&
+            (this.eos == null || !this.eos.equals(other.eos)))
+            return false;
+        if (this.func != other.func)
+            return false;
+        if (this.lastLine != other.lastLine &&
+            (this.lastLine == null || !this.lastLine.equals(other.lastLine)))
+            return false;
+        return true;
+    }
+
+    public int hashCode() {
+        int hash = 7;
+
+        hash = 83 * hash + (this.eos != null ? this.eos.hashCode()
+                                             : 0);
+        hash = 83 * hash + this.func;
+        hash = 83 * hash + (this.lastLine != null ? this.lastLine.hashCode()
+                                                  : 0);
+        return hash;
+    }
+
+    
+    public String toString() {
+        return "HeredocTerm[" + func + "," + eos + "," + lastLine + "," + processingEmbedded + "]";
+    }
+
+    // END NETBEANS MODIFICATIONS
 }
diff -r -u jruby-1.0.2/src/org/jruby/lexer/yacc/LexState.java jruby-1.0.2/src/org/jruby/lexer/yacc/LexState.java
--- jruby-1.0.2/src/org/jruby/lexer/yacc/LexState.java	2007-11-01 10:31:58.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/lexer/yacc/LexState.java	2007-11-02 00:47:37.000000000 -0700
@@ -47,6 +47,40 @@
 
     private final String debug;
 
+    // BEGIN NETBEANS MODIFICATIONS
+    private int ordinal;
+    static {
+        EXPR_BEG.ordinal = 0;
+        EXPR_END.ordinal = 1;
+        EXPR_ARG.ordinal = 2;
+        EXPR_CMDARG.ordinal = 3;
+        EXPR_ENDARG.ordinal = 4;
+        EXPR_MID.ordinal = 5;
+        EXPR_FNAME.ordinal = 6;
+        EXPR_DOT.ordinal = 7;
+        EXPR_CLASS.ordinal = 8;
+    }
+    
+    public int getOrdinal() {
+        return ordinal;
+    }
+    
+    public static LexState fromOrdinal(int ordinal) {
+        switch (ordinal) {
+            case 0: return EXPR_BEG;
+            case 1: return EXPR_END;
+            case 2: return EXPR_ARG;
+            case 3: return EXPR_CMDARG;
+            case 4: return EXPR_ENDARG;
+            case 5: return EXPR_MID;
+            case 6: return EXPR_FNAME;
+            case 7: return EXPR_DOT;
+            case 8: return EXPR_CLASS;
+        }
+        return null;
+    }
+    // END NETBEANS MODIFICATIONS
+    
     private LexState(String debug) {
         this.debug = debug;
     }
diff -r -u jruby-1.0.2/src/org/jruby/lexer/yacc/LexerSource.java jruby-1.0.2/src/org/jruby/lexer/yacc/LexerSource.java
--- jruby-1.0.2/src/org/jruby/lexer/yacc/LexerSource.java	2007-11-01 10:31:58.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/lexer/yacc/LexerSource.java	2007-11-02 00:47:37.000000000 -0700
@@ -445,4 +445,24 @@
             return null;
         }
     }
+    
+    // BEGIN NETBEANS MODIFICATIONS
+    public int chompReadAhead() {
+        int result = bufLength+1;
+        bufLength = -1;
+        return result;
+    }
+    
+    public boolean isANewLine() {
+        return oneAgo == '\n';
+    }
+    // Various places where we call LexerSource.unread(), the nextCharIsOnANewline value gets inaccurate (column/line too, but I don't care about those)
+    public void setIsANewLine(boolean nextCharIsOnANewLine) {
+        oneAgo = nextCharIsOnANewLine ? '\n' : oneAgo;
+    }
+    
+    public void setOffset(int offset) {
+        this.offset = offset;
+    }
+    // END NETBEANS MODIFICATIONS
 }
diff -r -u jruby-1.0.2/src/org/jruby/lexer/yacc/RubyYaccLexer.java jruby-1.0.2/src/org/jruby/lexer/yacc/RubyYaccLexer.java
--- jruby-1.0.2/src/org/jruby/lexer/yacc/RubyYaccLexer.java	2007-11-01 10:31:58.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/lexer/yacc/RubyYaccLexer.java	2007-11-02 00:47:37.000000000 -0700
@@ -74,6 +74,124 @@
     // grammar use.
     private LexState lex_state;
     
+// BEGIN NETBEANS MODIFICATIONS 
+    // Whether or not the lexer should be "space preserving" - see setPreserveSpaces/getPreserveSpaces
+    // the parser should consider whitespace sequences and code comments to be separate
+    // tokens to return to the client. Parsers typically do not want to see any
+    // whitespace or comment tokens - but an IDE trying to tokenize a chunk of source code
+    // does want to identify these separately. The default, false, means the parser mode.
+    private boolean preserveSpaces;
+    
+    // List of HeredocTerms to be applied when we see a new line.
+    // This is done to be able to handle heredocs in input source order (instead of
+    // the normal JRuby operation of handling it out of order by stashing the rest of
+    // the line on the side while searching for the end of the heredoc, and then pushing
+    // the line back on the input before proceeding). Out-of-order handling of tokens
+    // is difficult for the IDE to handle, so in syntax highlighting mode we process the
+    // output differently. When we see a heredoc token, we return a normal string-begin
+    // token, but we also push the heredoc term (without line-state) into the "newline-list"
+    // and continue processing normally (with no string strterm in effect).
+    // Whenever we get to a new line, we look at the newline list, and if we find something
+    // there, we pull it off and set it as the current string term and use it to process
+    // the string literal and end token.
+    // NOTE:: This list should not be modified but rather duplicated, in order to ensure
+    // that incremental lexing (which relies on pulling out these lists at token boundaries)
+    // will not interfere with each other.
+    
+    public static class HeredocContext {
+        private HeredocTerm[] heredocTerms;
+        private boolean[] lookingForEnds;
+
+        
+        public HeredocContext(HeredocTerm term) {
+            this.heredocTerms = new HeredocTerm[] { term, term };
+            this.lookingForEnds = new boolean[] { false, true };
+        }
+
+        private HeredocContext(HeredocTerm[] terms, boolean[] lookingForEnds) {
+            this.heredocTerms = terms;
+            this.lookingForEnds = lookingForEnds;
+        }
+        
+        private HeredocContext add(HeredocTerm h) {
+            // Add 2 entries: one for starting lexing of the string, one for the end token
+            HeredocTerm[] copy = new HeredocTerm[heredocTerms.length+2];
+            System.arraycopy(heredocTerms, 0, copy, 0, heredocTerms.length);
+            copy[heredocTerms.length] = h;
+            copy[heredocTerms.length+1] = h;
+
+            boolean[] copy2 = new boolean[lookingForEnds.length+2];
+            System.arraycopy(lookingForEnds, 0, copy2, 0, lookingForEnds.length);
+            copy2[lookingForEnds.length] = false;
+            copy2[lookingForEnds.length+1] = true;
+            
+            HeredocContext hc = new HeredocContext(copy, copy2);
+            
+            return hc;
+        }
+
+        private HeredocTerm getTerm() {
+            return heredocTerms[0];
+        }
+        
+        private HeredocContext pop() {
+            if (heredocTerms.length > 1) {
+                HeredocTerm[] copy = new HeredocTerm[heredocTerms.length-1];
+                System.arraycopy(heredocTerms, 1, copy, 0, copy.length);
+
+                boolean[] copy2 = new boolean[lookingForEnds.length-1];
+                System.arraycopy(lookingForEnds, 1, copy2, 0, copy2.length);
+                
+                HeredocContext hc = new HeredocContext(copy, copy2);
+                return hc;
+            } else {
+                return null;
+            }
+        }
+        
+        public boolean isLookingForEnd() {
+            return lookingForEnds[0];
+        }
+        
+        //@Override
+        public String toString() {
+            StringBuilder sb = new StringBuilder("HeredocContext(count=");
+            sb.append(Integer.toString(heredocTerms.length));
+            sb.append("):");
+            for (int i = 0; i < heredocTerms.length; i++) {
+                if (i > 0) {
+                    sb.append(",");
+                }
+                sb.append("end:");
+                sb.append(lookingForEnds[i]);
+                sb.append(",term:");
+                sb.append(heredocTerms[i]);
+            }
+            return sb.toString();
+        }
+        
+        //@Override
+        public int hashCode() {
+            return heredocTerms[0].getMutableState().hashCode();
+        }
+
+        //@Override
+        public boolean equals(Object other) {
+            if (other instanceof HeredocContext) {
+               HeredocContext o = (HeredocContext)other;
+               if (o.heredocTerms.length != heredocTerms.length) {
+                   return false;
+               }
+               return heredocTerms[0].getMutableState().equals(o.heredocTerms[0].getMutableState());
+            } else {
+               return false;
+            }
+        }
+    }
+    public HeredocContext heredocContext;
+            
+// END NETBEANS MODIFICATIONS
+
     // Tempory buffer to build up a potential token.  Consumer takes responsibility to reset 
     // this before use.
     private StringBuffer tokenBuffer = new StringBuffer(60);
@@ -111,6 +229,11 @@
     	yaccValue = null;
     	src = null;
         lex_state = null;
+        // BEGIN NETBEANS MODIFICATIONS
+        // The null state causes problems in some scenarios for me. Besides using null to
+        // represent an initial state doesn't seem like a good idea.
+        lex_state = LexState.EXPR_BEG;
+        // END NETBEANS MODIFICATIONS
         resetStacks();
         lex_strterm = null;
         commandStart = true;
@@ -409,7 +532,40 @@
             } while ((c = src.read()) != EOF && isIdentifierChar(c));
             src.unread(c);
         }
+        // BEGIN NETBEANS MODIFICATIONS
+        // See issue #93990
+        // It is very difficult for the IDE (especially with incremental lexing)
+        // to handle heredocs with additional input on the line, where the
+        // input end up getting processed out of order (JRuby will read the rest
+        // of the line, process up to the end token, then stash the rest of the line
+        // back on the input and continue (which could process another heredoc)
+        // and then just jump over the heredocs since input is processed out of order.
+        // Instead, use our own HeredocTerms which behave differently; they don't
+        // mess with the output, and will be handled differently from within
+        // the lexer in that it gets invited back on the next line (in order)
+        if (preserveSpaces) {
+            String tok = tokenBuffer.toString();
+            HeredocTerm h = new HeredocTerm(tok, func, null);
 
+            if (term == '`') {
+                yaccValue = new Token("`", getPosition());
+                return Tokens.tXSTRING_BEG;
+            }
+
+            yaccValue = new Token("\"", getPosition());
+            
+            if (heredocContext == null) {
+                heredocContext = new HeredocContext(h);
+            } else {
+                heredocContext = heredocContext.add(h);
+            }
+            
+            return Tokens.tSTRING_BEG;
+            
+        }
+        // END NETBEANS MODIFICATIONS
+        
+        
         String line = src.readLine() + '\n';
         String tok = tokenBuffer.toString();
         lex_strterm = new HeredocTerm(tok, func, line);
@@ -452,9 +608,15 @@
         }
         src.unread(c);
         
+// BEGIN NETBEANS MODIFICATIONS
+      if (parserSupport != null) {
+// END NETBEANS MODIFICATIONS
         // Store away each comment to parser result so IDEs can do whatever they want with them.
         ISourcePosition position = startPosition.union(getPosition());
         parserSupport.getResult().addComment(new CommentNode(position, tokenBuffer.toString()));
+// BEGIN NETBEANS MODIFICATIONS
+      }
+// END NETBEANS MODIFICATIONS
         
         return c;
     }
@@ -589,13 +751,56 @@
         boolean spaceSeen = false;
         boolean commandState;
         
+        // BEGIN NETBEANS MODIFICATIONS
+        if (setSpaceSeen) {
+            spaceSeen = true;
+            setSpaceSeen = false;
+        }
+        // END NETBEANS MODIFICATIONS
+
+        // BEGIN NETBEANS MODIFICATIONS
+        // On new lines, possibly resume heredoc processing
+        // See documentation for newlineTerms for an explanation of this
+        if (heredocContext != null) {
+            if (heredocContext.isLookingForEnd()) {
+                HeredocTerm ht = heredocContext.getTerm();
+                lex_strterm = ht;
+            } else if (src.isANewLine()) {
+                // Can be triggered, disabling for now to cause
+                // less severe symptoms
+                //assert lex_strterm == null;
+
+                HeredocTerm ht = heredocContext.getTerm();
+                lex_strterm = ht;
+                heredocContext = heredocContext.pop();
+            } 
+        }
+        // END NETBEANS MODIFICATIONS
+
         if (lex_strterm != null) {
+            // BEGIN NETBEANS MODIFICATIONS
+            try {
+            // END NETBEANS MODIFICATIONS
 			int tok = lex_strterm.parseString(this, src);
 			if (tok == Tokens.tSTRING_END || tok == Tokens.tREGEXP_END) {
 			    lex_strterm = null;
 			    lex_state = LexState.EXPR_END;
+                            // BEGIN NETBEANS MODIFICATIONS
+                            if (heredocContext != null && heredocContext.isLookingForEnd()) {
+                                heredocContext = heredocContext.pop();
+                            }
+                            // END NETBEANS MODIFICATIONS
 			}
 			return tok;
+            // BEGIN NETBEANS MODIFICATIONS
+            } catch (SyntaxException se) {
+                // If we abort in string parsing, throw away the str term
+                // such that we don't try again on restart
+                lex_strterm = null;
+                lex_state = LexState.EXPR_END;
+                throw se;
+            }
+            // END NETBEANS MODIFICATIONS
         }
 
         commandState = commandStart;
@@ -614,13 +819,57 @@
                 /* white spaces */
             case ' ': case '\t': case '\f': case '\r':
             case '\13': /* '\v' */
+              // BEGIN NETBEANS MODIFICATIONS
+              if (preserveSpaces) {
+                  // Collapse all whitespace into one token
+                  while (true) {
+                      c = src.read();
+                      if (c != ' ' && c != '\t' && c != '\f' && c != '\r' && c != '\13') {
+                          break;
+                      }
+                  }
+                  src.unread(c);
+                  yaccValue = new Token("whitespace", getPosition());
+                  setSpaceSeen = true;
+                  return Tokens.tWHITESPACE;
+              } else {
+              // END NETBEANS MODIFICATIONS
                 getPosition();
                 spaceSeen = true;
                 continue retry;
+              // BEGIN NETBEANS MODIFICATIONS
+              }
+              // END NETBEANS MODIFICATIONS
             case '#':		/* it's a comment */
+              // BEGIN NETBEANS MODIFICATIONS
+              if (preserveSpaces) {
+                  // Skip to end of the comment
+                  while ((c = src.read()) != '\n') {
+                      if (c == EOF) {
+                          break;
+                      }
+                  }
+
+                  yaccValue = new Token("line-comment", getPosition());
+                  setSpaceSeen = spaceSeen;
+                  // Ensure that commandStart and lex_state is updated
+                  // as it otherwise would have if preserveSpaces was false
+                  if (!(lex_state == LexState.EXPR_BEG ||
+                      lex_state == LexState.EXPR_FNAME ||
+                      lex_state == LexState.EXPR_DOT ||
+                      lex_state == LexState.EXPR_CLASS)) {
+                      commandStart = true;
+                      lex_state = LexState.EXPR_BEG;
+                  }
+                  return Tokens.tCOMMENT;
+              } else {
+              // END NETBEANS MODIFICATIONS
                 if (readComment(c) == 0) return 0;
                     
                 /* fall through */
+              // BEGIN NETBEANS MODIFICATIONS
+              }
+              // END NETBEANS MODIFICATIONS
             case '\n':
             	// Replace a string of newlines with a single one
                 while((c = src.read()) == '\n') {
@@ -628,6 +877,22 @@
                 }
                 src.unread( c );
                 getPosition();
+                // BEGIN NETBEANS MODIFICATIONS
+                if (preserveSpaces) {
+                    src.setIsANewLine(true);
+                    yaccValue = new Token("whitespace", getPosition());
+                    // Ensure that commandStart and lex_state is updated
+                    // as it otherwise would have if preserveSpaces was false
+                    if (!(lex_state == LexState.EXPR_BEG ||
+                        lex_state == LexState.EXPR_FNAME ||
+                        lex_state == LexState.EXPR_DOT ||
+                        lex_state == LexState.EXPR_CLASS)) {
+                        commandStart = true;
+                        lex_state = LexState.EXPR_BEG;
+                    }
+                    return Tokens.tWHITESPACE;
+                }
+                // END NETBEANS MODIFICATIONS
 
                 if (lex_state == LexState.EXPR_BEG ||
                     lex_state == LexState.EXPR_FNAME ||
@@ -720,11 +985,21 @@
                                     tokenBuffer.append(equalLabel);
                                     tokenBuffer.append(src.readLine());
                                     src.unread('\n');
+                                    // PENDING: src.setIsANewLine(true);
                                     break;
                                 }
                             }
                             
+// BEGIN NETBEANS MODIFICATIONS
+                          if (parserSupport != null)
+// END NETBEANS MODIFICATIONS
                             parserSupport.getResult().addComment(new CommentNode(getPosition(), tokenBuffer.toString()));
+                            // BEGIN NETBEANS MODIFICATIONS
+                            if (preserveSpaces) {
+                                yaccValue = new Token("here-doc", getPosition());
+                                return Tokens.tDOCUMENTATION;
+                            }
+                            // END NETBEANS MODIFICATIONS
                             continue retry;
                         }
 						src.unread(c);
@@ -1411,6 +1686,9 @@
 
             case '_':
                 if (src.wasBeginOfLine() && src.matchString("_END__", false)) {
+// BEGIN NETBEANS MODIFICATIONS
+                      if (parserSupport != null)
+// END NETBEANS MODIFICATIONS
                 	parserSupport.getResult().setEndSeen(true);
                     return 0;
                 }
@@ -1426,6 +1704,10 @@
                 break;
             }
     
+            // BEGIN NETBEANS MODIFICATIONS
+            // Need to undo newline status after reading too far
+            boolean wasNewline = src.wasBeginOfLine();
+            // END NETBEANS MODIFICATIONS
             do {
                 tokenBuffer.append(c);
                 /* no special multibyte character handling is needed in Java
@@ -1437,6 +1719,9 @@
                         tokenBuffer.append(c);
                     }
                 }*/
+                // BEGIN NETBEANS MODIFICATIONS
+                wasNewline = src.wasBeginOfLine();
+                // END NETBEANS MODIFICATIONS
                 c = src.read();
             } while (isIdentifierChar(c));
             
@@ -1449,6 +1734,9 @@
             	src.unread(peek);
             	src.unread(c);
             }
+            // BEGIN NETBEANS MODIFICATIONS
+            src.setIsANewLine(wasNewline);
+            // END NETBEANS MODIFICATIONS
             
             int result = 0;
 
@@ -1552,6 +1840,9 @@
             // Lame: parsing logic made it into lexer in ruby...So we
             // are emulating
             // FIXME:  I believe this is much simpler now...
+// BEGIN NETBEANS MODIFICATIONS
+          if (parserSupport != null) {
+// END NETBEANS MODIFICATIONS
             StaticScope scope = parserSupport.getCurrentScope();
             if (IdUtil.getVarType(tempVal) == IdUtil.LOCAL_VAR &&
                     last_state != LexState.EXPR_DOT &&
@@ -1559,7 +1850,9 @@
                     (scope.getLocalScope().isDefined(tempVal) >= 0)) {
                 lex_state = LexState.EXPR_END;
             }
-
+// BEGIN NETBEANS MODIFICATIONS
+          }
+// END NETBEANS MODIFICATIONS
             yaccValue = new Token(tempVal, getPosition());
 
             return result;
@@ -1809,4 +2102,75 @@
 		yaccValue = getInteger(number, 10);
 		return Tokens.tINTEGER;
     }
+
+// BEGIN NETBEANS MODIFICATIONS
+    /**
+     * Set whether or not the lexer should be "space preserving" - in other words, whether
+     * the parser should consider whitespace sequences and code comments to be separate
+     * tokens to return to the client. Parsers typically do not want to see any
+     * whitespace or comment tokens - but an IDE trying to tokenize a chunk of source code
+     * does want to identify these separately. The default, false, means the parser mode.
+     *
+     * @param preserveSpaces If true, return space and comment sequences as tokens, if false, skip these
+     * @see #getPreserveSpaces
+     */
+    public void setPreserveSpaces(final boolean preserveSpaces) {
+        this.preserveSpaces = preserveSpaces;
+    }
+
+    /**
+     * Return whether or not the lexer should be "space preserving". For a description
+     * of what this means, see {@link #setPreserveSpaces}.
+     *
+     * @return preserveSpaces True iff space and comment sequences will be returned as
+     * tokens, and false otherwise.
+     *
+     * @see #setPreserveSpaces
+     */
+    public boolean getPreserveSpaces() {
+        return preserveSpaces;
+    }
+    
+    public LexState getLexState() {
+        return lex_state;
+    }
+    
+    public void setLexState(final LexState lex_state) {
+        this.lex_state = lex_state;
+    }
+    
+    public boolean isSetSpaceSeen() {
+        return setSpaceSeen;
+    }
+    
+    public void setSpaceSeen(boolean setSpaceSeen) {
+        this.setSpaceSeen = setSpaceSeen;
+    }
+    
+    public boolean isCommandStart() {
+        return commandStart;
+    }
+    
+    public void setCommandStart(boolean commandStart) {
+        this.commandStart = commandStart;
+    }
+
+    public LexerSource getSource() {
+        return this.src;
+    }
+    
+    /* In normal JRuby, there is a "spaceSeen" flag which is local to yylex. It is
+     * used to interpret input based on whether a space was recently seen.
+     * Since I now bail -out- of yylex() when I see space, I need to be able
+     * to preserve this flag across yylex() calls. In most cases, "spaceSeen"
+     * should be set to false (as it previous was at the beginning of yylex().
+     * However, when I've seen a space and have bailed out, I need to set spaceSeen=true
+     * on the next call to yylex(). This is what the following flag is all about.
+     * It is set to true when we bail out on space (or other states that didn't
+     * previous bail out and spaceSeen is true).
+     */
+    private boolean setSpaceSeen;
+
+    
+// END NETBEANS MODIFICATIONS
 }
diff -r -u jruby-1.0.2/src/org/jruby/lexer/yacc/StrTerm.java jruby-1.0.2/src/org/jruby/lexer/yacc/StrTerm.java
--- jruby-1.0.2/src/org/jruby/lexer/yacc/StrTerm.java	2007-11-01 10:31:58.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/lexer/yacc/StrTerm.java	2007-11-02 00:47:37.000000000 -0700
@@ -29,4 +29,46 @@
 
 public abstract class StrTerm {
     public abstract int parseString(RubyYaccLexer lexer, LexerSource src) throws java.io.IOException;
+
+    // BEGIN NETBEANS MODIFICATIONS
+    /** Tell this string term to return separate tokens for embedded ruby code (#$foo, #@foo, #{foo}) */
+    public abstract void splitEmbeddedTokens();
+
+    /** 
+     * Report whether this string should be substituting things like \n into newlines (double
+     * quoting rules).
+     * E.g. are we dealing with a "" string or a '' string (or their alternate representations)
+     */
+    public abstract boolean isSubstituting();
+    
+    // When StringTerm processes a string with an embedded code fragment (or variable),
+    // such as #{thiscode()}, it splits the string up at the beginning of the boundary
+    // and returns Tokens.tSTRING_DBEG or Tokens.tSTRING_DVAR. However, it doesn't
+    // split the string up where the embedded code ends, it just processes to the end.
+    // For my lexing purposes that's not good enough; I want to know where the embedded
+    // fragment ends (so I can lex that String as real Ruby code rather than just
+    // a String literal).
+    // However, 
+    /** Default; ignore embedded fragments */
+    final static int IGNORE_EMBEDDED = 0;
+    /** Flag set in embeddedCode when we are processing an embedded code expression: #{foo} */
+    final static int LOOKING_FOR_EMBEDDED = 1;
+    /** Flag set in embeddedCode when we are processing an embedded code expression: #{foo} */
+    final static int EMBEDDED_DEXPR = 2;
+    /** Flag set in embeddedCode when we are processing an embedded variable: #@foo */
+    final static int EMBEDDED_DVAR = 3;
+    /** Flag set while we're processing embedded Ruby expressions. It will be 0 when we are not,
+     * or otherwise set to the the relevant embedded type (EMBEDDED_DVAR or EMBEDDED_DEXPR) */
+    protected int processingEmbedded;
+    /**
+     * Record any mutable state from this StrTerm such that it can
+     * be set back to this exact state through a call to {@link #setMutableState}
+     * later on. Necessary for incremental lexing where we may restart
+     * lexing parts of a string (since they can be split up due to
+     * Ruby embedding like "Evaluated by Ruby: #{foo}".
+     */
+    public abstract Object getMutableState();
+    /** Support for incremental lexing: set current state of the term. See {@link #getMutableState} */
+    public abstract void setMutableState(Object o);
+    // END NETBEANS MODIFICATIONS
 }
diff -r -u jruby-1.0.2/src/org/jruby/lexer/yacc/StringTerm.java jruby-1.0.2/src/org/jruby/lexer/yacc/StringTerm.java
--- jruby-1.0.2/src/org/jruby/lexer/yacc/StringTerm.java	2007-11-01 10:31:58.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/lexer/yacc/StringTerm.java	2007-11-02 00:47:37.000000000 -0700
@@ -69,8 +69,20 @@
             space = 1;
         }
 
-        if (c == term && nest == 0) {
+        // BEGIN NETBEANS MODIFICATIONS
+        //if (c == term && nest == 0) {
+        if ((processingEmbedded == IGNORE_EMBEDDED || processingEmbedded == LOOKING_FOR_EMBEDDED) && (c == term) && (nest == 0)) {
+        // END NETBEANS MODIFICATIONS
             if ((func & RubyYaccLexer.STR_FUNC_QWORDS) != 0) {
+                // BEGIN NETBEANS MODIFICATIONS
+                if (processingEmbedded == LOOKING_FOR_EMBEDDED) { // Only make this change when lexing, not parsing
+                    // I want the terminating ")" to be passed as a string closure token,
+                    // not as a plain rparen, since I want it to match up with the 
+                    // string opening tag (and I don't want an unbalanced right paren)
+                    lexer.setValue(new Token(""+term, lexer.getPosition()));
+                    return Tokens.tSTRING_END;
+                }
+                // END NETBEANS MODIFICATIONS
                 func = -1;
                 lexer.getPosition();
                 return ' ';
@@ -89,26 +101,88 @@
         }
         ByteList buffer = new ByteList();
 
+        // BEGIN NETBEANS MODIFICATIONS
+        if ((processingEmbedded == EMBEDDED_DEXPR) && (c == '}')) {
+            processingEmbedded = LOOKING_FOR_EMBEDDED;
+            lexer.setValue(new Token("}", lexer.getPosition()));
+            return Tokens.tSTRING_CONTENT;
+        }
+        // END NETBEANS MODIFICATIONS
+        
         if ((func & RubyYaccLexer.STR_FUNC_EXPAND) != 0 && c == '#') {
             c = src.read();
             switch (c) {
             case '$':
             case '@':
+                // BEGIN NETBEANS MODIFICATIONS
+                if (processingEmbedded == LOOKING_FOR_EMBEDDED) {
+                    processingEmbedded = EMBEDDED_DVAR;
+                }
+                // END NETBEANS MODIFICATIONS
                 src.unread(c);
                 lexer.setValue(new Token("#" + c, lexer.getPosition()));
                 return Tokens.tSTRING_DVAR;
             case '{':
+                // BEGIN NETBEANS MODIFICATIONS
+                if (processingEmbedded == LOOKING_FOR_EMBEDDED) {
+                    processingEmbedded = EMBEDDED_DEXPR;
+                }
+                // END NETBEANS MODIFICATIONS
                 lexer.setValue(new Token("#" + c, lexer.getPosition())); 
                 return Tokens.tSTRING_DBEG;
             }
             buffer.append('#');
         }
         src.unread(c);
-        if (parseStringIntoBuffer(src, buffer) == 0) {
-            throw new SyntaxException(src.getPosition(), "unterminated string meets end of file");
+        // BEGIN NETBEANS MODIFICATIONS
+        //if (parseStringIntoBuffer(src, buffer) == 0) {
+        int parsed;
+        if (processingEmbedded == EMBEDDED_DEXPR) {
+            parsed = parseDExprIntoBuffer(src, buffer);
+        } else {
+            parsed = parseStringIntoBuffer(src, buffer);
+        }
+        if (parsed == 0) {
+        // END NETBEANS MODIFICATIONS
+            // BEGIN NETBEANS MODIFICATIONS
+            // We've read to the end of input and haven't found a corresponding String
+            // terminator. However, we don't always want to return the rest of the input as
+            // erroneous; in lexing mode, we want to stop at the first newline
+            // (at least or normal quoted strings, possibly not for heredocs etc.)
+            // and resume parsing from there, since it's likely that we're in the middle
+            // of typing a string.
+            // We've gotta push the "unused portion" of the string back into the input;
+            // the unused portion is the portion after the first newline.
+//            int n = buffer.length();
+//            for (int j = 0; j < n; j++) {
+//                if (buffer.charAt(j) == '\n') {
+//                    // Found it.
+//                    j++; // Include at least one
+//                    for (int k = n-1; k >= j; k--) {
+//                        // push input back in reverse order
+//                        src.unread(buffer.charAt(k));
+//                    }
+//                    // Fall through outer loop and throw SyntaxException
+//                    break;
+//                }
+//            }
+            //throw new SyntaxException(src.getPosition(), "unterminated string meets end of file");
+            throw new UnterminatedStringException(src.getPosition(), "unterminated string meets end of file");
+            // END NETBEANS MODIFICATIONS
         }
 
         lexer.setValue(new StrNode(lexer.getPosition(), buffer)); 
+
+        // BEGIN NETBEANS MODIFICATIONS
+        // DVARs last only for a single string token so shut if off here.
+        if (processingEmbedded == EMBEDDED_DVAR) {
+            processingEmbedded = LOOKING_FOR_EMBEDDED;
+        } else if ((processingEmbedded == EMBEDDED_DEXPR) && (buffer.length() == 0)) {
+            // Unbalanced expression - see #96485
+            processingEmbedded = LOOKING_FOR_EMBEDDED;
+        }
+        // END NETBEANS MODIFICATIONS
+
         return Tokens.tSTRING_CONTENT;
     }
 
@@ -158,13 +232,21 @@
         }
         return options | kcode;
     }
-
+    
     public char parseStringIntoBuffer(LexerSource src, ByteList buffer) throws java.io.IOException {
         char c;
 
         while ((c = src.read()) != RubyYaccLexer.EOF) {
             if (paren != '\0' && c == paren) {
                 nest++;
+            // BEGIN NETBEANS MODIFICATIONS
+            } else if (processingEmbedded == EMBEDDED_DEXPR && c == '}') {
+                src.unread(c);
+                break;
+            } else if (processingEmbedded == EMBEDDED_DVAR && !((c == '_') || c == '$' || c == '@' || Character.isLetter(c))) {
+                 src.unread(c);
+                 break;
+            // END NETBEANS MODIFICATIONS
             } else if (c == term) {
                 if (nest == 0) {
                     src.unread(c);
@@ -230,6 +312,72 @@
         return c;
     }
 
+    // BEGIN NETBEANS MODIFICATIONS
+    public char parseDExprIntoBuffer(LexerSource src, ByteList buffer) throws java.io.IOException {
+        char c;
+        
+        assert processingEmbedded == EMBEDDED_DEXPR;
+
+        while ((c = src.read()) != RubyYaccLexer.EOF) {
+            if (c == '{') {
+                nest++;
+            } else if (c == '}') {
+                if (nest == 0) {
+                    src.unread(c);
+                    break;
+                }
+                nest--;
+            } else if (c == '\\') {
+                c = src.read();
+                switch (c) {
+                case '\n':
+                    if ((func & RubyYaccLexer.STR_FUNC_QWORDS) != 0) {
+                        break;
+                    }
+                    if ((func & RubyYaccLexer.STR_FUNC_EXPAND) != 0) {
+                        continue;
+                    }
+                    buffer.append('\\');
+                    break;
+
+                case '\\':
+                    if ((func & RubyYaccLexer.STR_FUNC_ESCAPE) != 0) {
+                        buffer.append(c);
+                    }
+                    break;
+
+                default:
+                    if ((func & RubyYaccLexer.STR_FUNC_REGEXP) != 0) {
+                        src.unread(c);
+                        parseEscapeIntoBuffer(src, buffer);
+                        continue;
+                    } else if ((func & RubyYaccLexer.STR_FUNC_EXPAND) != 0) {
+                        src.unread(c);
+                        if ((func & RubyYaccLexer.STR_FUNC_ESCAPE) != 0) {
+                            buffer.append('\\');
+                        }
+                        c = src.readEscape();
+                    } else if ((func & RubyYaccLexer.STR_FUNC_QWORDS) != 0
+                            && Character.isWhitespace(c)) {
+                        /* ignore backslashed spaces in %w */
+                    } else if (c != term && !(paren != '\0' && c == paren)) {
+                        buffer.append('\\');
+                    }
+                }
+            } else if ((func & RubyYaccLexer.STR_FUNC_QWORDS) != 0
+                    && Character.isWhitespace(c)) {
+                src.unread(c);
+                break;
+            }
+            if (c == '\0' && (func & RubyYaccLexer.STR_FUNC_SYMBOL) != 0) {
+                throw new SyntaxException(src.getPosition(), "symbol cannot contain '\\0'");
+            }
+            buffer.append(c);
+        }
+        return c;
+    }
+    // END NETBEANS MODIFICATIONS
+
     // Was a goto in original ruby lexer
     private void escaped(LexerSource src, ByteList buffer) throws java.io.IOException {
         char c;
@@ -315,4 +463,139 @@
             buffer.append(c);
         }
     }
+
+    // BEGIN NETBEANS MODIFICATIONS
+    public boolean isSubstituting() {
+        return (func & RubyYaccLexer.STR_FUNC_EXPAND) != 0;
+    }
+
+    public Object getMutableState() {
+        return new MutableTermState(processingEmbedded, nest);
+    }
+
+    public void setMutableState(Object o) {
+        MutableTermState state = (MutableTermState)o;
+        if (state != null) {
+            this.processingEmbedded = state.processingEmbedded;
+            this.nest = state.nest;
+        }
+    }
+    
+    public void splitEmbeddedTokens() {
+        if (processingEmbedded == IGNORE_EMBEDDED) {
+            processingEmbedded = LOOKING_FOR_EMBEDDED;
+        }
+    }
+
+    private class MutableTermState {
+        private MutableTermState(int embeddedCode, int nest) {
+            this.processingEmbedded = embeddedCode;
+            this.nest = nest;
+        }
+        
+        public boolean equals(Object obj) {
+            if (obj == null)
+                return false;
+            if (getClass() != obj.getClass())
+                return false;
+            final MutableTermState other = (MutableTermState) obj;
+
+            if (this.nest != other.nest)
+                return false;
+            if (this.processingEmbedded != other.processingEmbedded)
+                return false;
+            return true;
+        }
+
+        public int hashCode() {
+            int hash = 7;
+
+            hash = 83 * hash + this.nest;
+            hash = 83 * hash + this.processingEmbedded;
+            return hash;
+        }
+        
+        public String toString() {
+            return "StringTermState[nest=" + nest + ",embed=" + processingEmbedded + "]";
+        }
+        
+        private int nest;
+        private int processingEmbedded;
+    }
+    
+    // Equals - primarily for unit testing (incremental lexing tests
+    // where we do full-file-lexing and compare state to incremental lexing)
+    public boolean equals(Object obj) {
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        final StringTerm other = (StringTerm) obj;
+
+        if (this.func != other.func)
+            return false;
+        if (this.term != other.term)
+            return false;
+        if (this.processingEmbedded != other.processingEmbedded)
+            return false;
+        if (this.paren != other.paren)
+            return false;
+        if (this.nest != other.nest)
+            return false;
+        return true;
+    }
+    
+    private static String toFuncString(int func) {
+        StringBuilder sb = new StringBuilder();
+        if ((func & RubyYaccLexer.STR_FUNC_ESCAPE) != 0) {
+            sb.append("escape|");
+        }
+        if ((func & RubyYaccLexer.STR_FUNC_EXPAND) != 0) {
+            sb.append("expand|");
+        }
+        if ((func & RubyYaccLexer.STR_FUNC_REGEXP) != 0) {
+            sb.append("regexp|");
+        }
+        if ((func & RubyYaccLexer.STR_FUNC_QWORDS) != 0) {
+            sb.append("qwords|");
+        }
+        if ((func & RubyYaccLexer.STR_FUNC_SYMBOL) != 0) {
+            sb.append("symbol|");
+        }
+        if ((func & RubyYaccLexer.STR_FUNC_INDENT) != 0) {
+            sb.append("indent|");
+        }
+        
+        String s = sb.toString();
+        
+        if (s.endsWith("|")) {
+            s = s.substring(0, s.length()-1);
+        } else if (s.length() == 0) {
+            s = "-";
+        }
+
+        return s;
+    }
+
+    public String toString() {
+        return "StringTerm[func=" + toFuncString(func) + ",term=" + term + ",paren=" + (int)paren + ",nest=" + nest + ",embed=" + processingEmbedded + "]";
+    }
+
+    public int hashCode() {
+        int hash = 7;
+
+        hash = 13 * hash + this.func;
+        hash = 13 * hash + this.term;
+        hash = 13 * hash + this.paren;
+        hash = 13 * hash + this.nest;
+        hash = 13 * hash + this.processingEmbedded;
+        return hash;
+    }
+    
+    public static class UnterminatedStringException extends SyntaxException {
+        public UnterminatedStringException(ISourcePosition pos, String message) {
+            super(pos, message);
+        }
+    }
+    // END NETBEANS MODIFICATIONS
 }
diff -r -u jruby-1.0.2/src/org/jruby/libraries/RbConfigLibrary.java jruby-1.0.2/src/org/jruby/libraries/RbConfigLibrary.java
--- jruby-1.0.2/src/org/jruby/libraries/RbConfigLibrary.java	2007-11-01 10:32:20.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/libraries/RbConfigLibrary.java	2007-11-02 00:47:37.000000000 -0700
@@ -77,9 +77,9 @@
         setConfig(configHash, "target_cpu", System.getProperty("os.arch"));
         
         String jrubyJarFile = "jruby.jar";
-        URL jrubyPropertiesUrl = Ruby.class.getClassLoader().getResource("jruby.properties");
+        URL jrubyPropertiesUrl = Ruby.class.getClassLoader().getResource("/org/jruby/jruby.properties");
         if (jrubyPropertiesUrl != null) {
-            Pattern jarFile = Pattern.compile("jar:file:.*?([a-zA-Z0-9.\\-]+\\.jar)!/jruby.properties");
+            Pattern jarFile = Pattern.compile("jar:file:.*?([a-zA-Z0-9.\\-]+\\.jar)!/org/jruby/jruby.properties");
             Matcher jarMatcher = jarFile.matcher(jrubyPropertiesUrl.toString());
             jarMatcher.find();
             if (jarMatcher.matches()) {
diff -r -u jruby-1.0.2/src/org/jruby/parser/Tokens.java jruby-1.0.2/src/org/jruby/parser/Tokens.java
--- jruby-1.0.2/src/org/jruby/parser/Tokens.java	2007-11-01 10:32:02.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/parser/Tokens.java	2007-11-02 00:47:37.000000000 -0700
@@ -152,6 +152,16 @@
     int tRCURLY     = DefaultRubyParser.tRCURLY;
     int tPIPE       = DefaultRubyParser.tPIPE;
 
+    // BEGIN NETBEANS MODIFICATIONS
+    // I'd like to pull these out of the grammar, but I don't dare to regenerate the yacc file etc.
+    // so for now just use unused constants
+    //int tCOMMENT    = DefaultRubyParser.tCOMMENT;
+    //int tWHITESPACE    = DefaultRubyParser.tWHITESPACE;
+    int tCOMMENT    = 50000;
+    int tWHITESPACE    = 50001;
+    int tDOCUMENTATION = 50002;
+    // END NETBEANS MODIFICATIONS
+
     String[] operators = {"+@", "-@", "**", "<=>", "==", "===", "!=", ">=", "<=", "&&",
                           "||", "=~", "!~", "..", "...", "[]", "[]=", "<<", ">>", "::"};
 }
diff -r -u jruby-1.0.2/src/org/jruby/runtime/Constants.java jruby-1.0.2/src/org/jruby/runtime/Constants.java
--- jruby-1.0.2/src/org/jruby/runtime/Constants.java	2007-11-01 10:31:52.000000000 -0700
+++ jruby-1.0.2/src/org/jruby/runtime/Constants.java	2007-11-02 00:47:37.000000000 -0700
@@ -52,7 +52,13 @@
 
     static {
         try {
-            properties.load(Constants.class.getResourceAsStream("/jruby.properties"));
+            // BEGIN NETBEANS MODIFICATIONS
+            // See issue 114387 - can't load jruby.properties from the default package
+            // when running inside the IDE. JRuby will soon move this file into the
+            // org.jruby package as well.
+            //properties.load(Constants.class.getResourceAsStream("/jruby.properties"));
+            properties.load(Constants.class.getResourceAsStream("/org/jruby/jruby.properties"));
+            // END NETBEANS MODIFICATIONS
         } catch (IOException ioe) {
             ioe.printStackTrace();
         }
@@ -63,7 +69,7 @@
         VERSION = properties.getProperty("version.jruby");
         BUILD = properties.getProperty("build.jruby");
         TARGET = properties.getProperty("target.jruby");
-        Matcher matcher = Pattern.compile("\\$Revision: 1.8 $").matcher(properties.getProperty("revision.jruby"));
+        Matcher matcher = Pattern.compile("\\$Re" + "vision: (.*?) \\$").matcher(properties.getProperty("revision.jruby"));
         if (matcher.find()) {
             REVISION = matcher.group(1);
         } else {
