Skip to content

rustdoc: Add syntax highlighting #12416

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 23, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
174 changes: 174 additions & 0 deletions src/librustdoc/html/highlight.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

//! Basic html highlighting functionality
//!
//! This module uses libsyntax's lexer to provide token-based highlighting for
//! the HTML documentation generated by rustdoc.

use std::str;
use std::io;

use syntax::parse;
use syntax::parse::lexer;
use syntax::diagnostic;
use syntax::codemap::{BytePos, Span};

use html::escape::Escape;

use t = syntax::parse::token;

/// Highlights some source code, returning the HTML output.
pub fn highlight(src: &str) -> ~str {
let sess = parse::new_parse_sess();
let handler = diagnostic::mk_handler();
let span_handler = diagnostic::mk_span_handler(handler, sess.cm);
let fm = parse::string_to_filemap(sess, src.to_owned(), ~"<stdin>");

let mut out = io::MemWriter::new();
doit(sess,
lexer::new_string_reader(span_handler, fm),
&mut out).unwrap();
str::from_utf8_lossy(out.unwrap()).into_owned()
}

/// Exhausts the `lexer` writing the output into `out`.
///
/// The general structure for this method is to iterate over each token,
/// possibly giving it an HTML span with a class specifying what flavor of token
/// it's used. All source code emission is done as slices from the source map,
/// not from the tokens themselves, in order to stay true to the original
/// source.
fn doit(sess: @parse::ParseSess, lexer: lexer::StringReader,
out: &mut Writer) -> io::IoResult<()> {
use syntax::parse::lexer::Reader;

try!(write!(out, "<pre class='rust'>\n"));
let mut last = BytePos(0);
let mut is_attribute = false;
let mut is_macro = false;
loop {
let next = lexer.next_token();
let test = if next.tok == t::EOF {lexer.pos.get()} else {next.sp.lo};

// The lexer consumes all whitespace and non-doc-comments when iterating
// between tokens. If this token isn't directly adjacent to our last
// token, then we need to emit the whitespace/comment.
//
// If the gap has any '/' characters then we consider the whole thing a
// comment. This will classify some whitespace as a comment, but that
// doesn't matter too much for syntax highlighting purposes.
if test > last {
let snip = sess.cm.span_to_snippet(Span {
lo: last,
hi: test,
expn_info: None,
}).unwrap();
if snip.contains("/") {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doesn't this mean something like

   let x = 1;

    // foo
   let y = 1;

will categorise from the newline after the first ; to just before the let as a comment? (I guess trimming it to divide whitespace from comments might be overly annoying.)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is indeed true, but it's only used for changing the color of the text, so I don't think that it matters too much.

try!(write!(out, "<span class='comment'>{}</span>",
Escape(snip)));
} else {
try!(write!(out, "{}", Escape(snip)));
}
}
last = next.sp.hi;
if next.tok == t::EOF { break }

let klass = match next.tok {
// If this '&' token is directly adjacent to another token, assume
// that it's the address-of operator instead of the and-operator.
// This allows us to give all pointers their own class (~ and @ are
// below).
t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
t::AT | t::TILDE => "kw-2",

// consider this as part of a macro invocation if there was a
// leading identifier
t::NOT if is_macro => { is_macro = false; "macro" }

// operators
t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT |
t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW |
t::BINOPEQ(..) | t::FAT_ARROW => "op",

// miscellaneous, no highlighting
t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI |
t::COLON | t::MOD_SEP | t::LARROW | t::DARROW | t::LPAREN |
t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE |
t::DOLLAR => "",

// This is the start of an attribute. We're going to want to
// continue highlighting it as an attribute until the ending ']' is
// seen, so skip out early. Down below we terminate the attribute
// span when we see the ']'.
t::POUND => {
is_attribute = true;
try!(write!(out, r"<span class='attribute'>\#"));
continue
}
t::RBRACKET => {
if is_attribute {
is_attribute = false;
try!(write!(out, "]</span>"));
continue
} else {
""
}
}

// text literals
t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string",

// number literals
t::LIT_INT(..) | t::LIT_UINT(..) | t::LIT_INT_UNSUFFIXED(..) |
t::LIT_FLOAT(..) | t::LIT_FLOAT_UNSUFFIXED(..) => "number",

// keywords are also included in the identifier set
t::IDENT(ident, _is_mod_sep) => {
match t::get_ident(ident).get() {
"ref" | "mut" => "kw-2",

"self" => "self",
"false" | "true" => "boolval",

"Option" | "Result" => "prelude-ty",
"Some" | "None" | "Ok" | "Err" => "prelude-val",

_ if t::is_any_keyword(&next.tok) => "kw",
_ => {
if lexer.peek().tok == t::NOT {
is_macro = true;
"macro"
} else {
"ident"
}
}
}
}

t::LIFETIME(..) => "lifetime",
t::DOC_COMMENT(..) => "doccomment",
t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "",
};

// as mentioned above, use the original source code instead of
// stringifying this token
let snip = sess.cm.span_to_snippet(next.sp).unwrap();
if klass == "" {
try!(write!(out, "{}", Escape(snip)));
} else {
try!(write!(out, "<span class='{}'>{}</span>", klass,
Escape(snip)));
}
}

write!(out, "</pre>\n")
}

28 changes: 26 additions & 2 deletions src/librustdoc/html/markdown.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ use std::str;
use std::unstable::intrinsics;
use std::vec;

use html::highlight;

/// A unit struct which has the `fmt::Show` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
Expand Down Expand Up @@ -95,6 +97,7 @@ extern {
fn sd_markdown_free(md: *sd_markdown);

fn bufnew(unit: libc::size_t) -> *buf;
fn bufputs(b: *buf, c: *libc::c_char);
fn bufrelease(b: *buf);

}
Expand Down Expand Up @@ -127,7 +130,27 @@ pub fn render(w: &mut io::Writer, s: &str) -> fmt::Result {
asize: text.len() as libc::size_t,
unit: 0,
};
(my_opaque.dfltblk)(ob, &buf, lang, opaque);
let rendered = if lang.is_null() {
false
} else {
vec::raw::buf_as_slice((*lang).data,
(*lang).size as uint, |rlang| {
let rlang = str::from_utf8(rlang).unwrap();
if rlang.contains("notrust") {
(my_opaque.dfltblk)(ob, &buf, lang, opaque);
true
} else {
false
}
})
};

if !rendered {
let output = highlight::highlight(text).to_c_str();
output.with_ref(|r| {
bufputs(ob, r)
})
}
})
}
}
Expand Down Expand Up @@ -181,7 +204,8 @@ pub fn find_testable_code(doc: &str, tests: &mut ::test::Collector) {
vec::raw::buf_as_slice((*lang).data,
(*lang).size as uint, |lang| {
let s = str::from_utf8(lang).unwrap();
(s.contains("should_fail"), s.contains("ignore"))
(s.contains("should_fail"), s.contains("ignore") ||
s.contains("notrust"))
})
};
if ignore { return }
Expand Down
17 changes: 8 additions & 9 deletions src/librustdoc/html/render.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ use syntax::parse::token::InternedString;
use clean;
use doctree;
use fold::DocFolder;
use html::escape::Escape;
use html::format::{VisSpace, Method, PuritySpace};
use html::layout;
use html::markdown::Markdown;
use html::highlight;

/// Major driving force in all rustdoc rendering. This contains information
/// about where in the tree-like hierarchy rendering is occurring and controls
Expand Down Expand Up @@ -1091,7 +1091,8 @@ fn item_module(w: &mut Writer, cx: &Context,

fn item_function(w: &mut Writer, it: &clean::Item,
f: &clean::Function) -> fmt::Result {
try!(write!(w, "<pre class='fn'>{vis}{purity}fn {name}{generics}{decl}</pre>",
try!(write!(w, "<pre class='rust fn'>{vis}{purity}fn \
{name}{generics}{decl}</pre>",
vis = VisSpace(it.visibility),
purity = PuritySpace(f.purity),
name = it.name.get_ref().as_slice(),
Expand All @@ -1112,7 +1113,7 @@ fn item_trait(w: &mut Writer, it: &clean::Item,
}

// Output the trait definition
try!(write!(w, "<pre class='trait'>{}trait {}{}{} ",
try!(write!(w, "<pre class='rust trait'>{}trait {}{}{} ",
VisSpace(it.visibility),
it.name.get_ref().as_slice(),
t.generics,
Expand Down Expand Up @@ -1231,7 +1232,7 @@ fn render_method(w: &mut Writer, meth: &clean::Item) -> fmt::Result {

fn item_struct(w: &mut Writer, it: &clean::Item,
s: &clean::Struct) -> fmt::Result {
try!(write!(w, "<pre class='struct'>"));
try!(write!(w, "<pre class='rust struct'>"));
try!(render_struct(w, it, Some(&s.generics), s.struct_type, s.fields,
s.fields_stripped, "", true));
try!(write!(w, "</pre>"));
Expand All @@ -1255,7 +1256,7 @@ fn item_struct(w: &mut Writer, it: &clean::Item,
}

fn item_enum(w: &mut Writer, it: &clean::Item, e: &clean::Enum) -> fmt::Result {
try!(write!(w, "<pre class='enum'>{}enum {}{}",
try!(write!(w, "<pre class='rust enum'>{}enum {}{}",
VisSpace(it.visibility),
it.name.get_ref().as_slice(),
e.generics));
Expand Down Expand Up @@ -1532,7 +1533,7 @@ fn render_impl(w: &mut Writer, i: &clean::Impl,

fn item_typedef(w: &mut Writer, it: &clean::Item,
t: &clean::Typedef) -> fmt::Result {
try!(write!(w, "<pre class='typedef'>type {}{} = {};</pre>",
try!(write!(w, "<pre class='rust typedef'>type {}{} = {};</pre>",
it.name.get_ref().as_slice(),
t.generics,
t.type_));
Expand Down Expand Up @@ -1625,9 +1626,7 @@ impl<'a> fmt::Show for Source<'a> {
try!(write!(fmt.buf, "<span id='{0:u}'>{0:1$u}</span>\n", i, cols));
}
try!(write!(fmt.buf, "</pre>"));
try!(write!(fmt.buf, "<pre class='rust'>"));
try!(write!(fmt.buf, "{}", Escape(s.as_slice())));
try!(write!(fmt.buf, "</pre>"));
try!(write!(fmt.buf, "{}", highlight::highlight(s.as_slice())));
Ok(())
}
}
Expand Down
15 changes: 15 additions & 0 deletions src/librustdoc/html/static/main.css
Original file line number Diff line number Diff line change
Expand Up @@ -303,3 +303,18 @@ a {
.stability.Locked { border-color: #0084B6; color: #00668c; }

:target { background: #FDFFD3; }

pre.rust .kw { color: #cc782f; }
pre.rust .kw-2 { color: #3bbb33; }
pre.rust .prelude-ty { color: #3bbb33; }
pre.rust .number { color: #c13928; }
pre.rust .self { color: #c13928; }
pre.rust .boolval { color: #c13928; }
pre.rust .prelude-val { color: #c13928; }
pre.rust .op { color: #cc782f; }
pre.rust .comment { color: #533add; }
pre.rust .doccomment { color: #d343d0; }
pre.rust .macro { color: #d343d0; }
pre.rust .string { color: #c13928; }
pre.rust .lifetime { color: #d343d0; }
pre.rust .attribute { color: #d343d0 !important; }
1 change: 1 addition & 0 deletions src/librustdoc/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ pub mod core;
pub mod doctree;
pub mod fold;
pub mod html {
pub mod highlight;
pub mod escape;
pub mod format;
pub mod layout;
Expand Down
Loading