1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Basic html highlighting functionality
12 //!
13 //! This module uses libsyntax's lexer to provide token-based highlighting for
14 //! the HTML documentation generated by rustdoc.
15
16 use std::str;
17 use std::io;
18
19 use syntax::parse;
20 use syntax::parse::lexer;
21 use syntax::codemap::{BytePos, Span};
22
23 use html::escape::Escape;
24
25 use t = syntax::parse::token;
26
27 /// Highlights some source code, returning the HTML output.
28 pub fn highlight(src: &str, class: Option<&str>) -> ~str {
29 let sess = parse::new_parse_sess();
30 let fm = parse::string_to_filemap(&sess,
31 src.to_strbuf(),
32 "<stdin>".to_strbuf());
33
34 let mut out = io::MemWriter::new();
35 doit(&sess,
36 lexer::new_string_reader(&sess.span_diagnostic, fm),
37 class,
38 &mut out).unwrap();
39 str::from_utf8_lossy(out.unwrap().as_slice()).into_owned()
40 }
41
42 /// Exhausts the `lexer` writing the output into `out`.
43 ///
44 /// The general structure for this method is to iterate over each token,
45 /// possibly giving it an HTML span with a class specifying what flavor of token
46 /// it's used. All source code emission is done as slices from the source map,
47 /// not from the tokens themselves, in order to stay true to the original
48 /// source.
49 fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, class: Option<&str>,
50 out: &mut Writer) -> io::IoResult<()> {
51 use syntax::parse::lexer::Reader;
52
53 try!(write!(out, "<pre class='rust {}'>\n", class.unwrap_or("")));
54 let mut last = BytePos(0);
55 let mut is_attribute = false;
56 let mut is_macro = false;
57 let mut is_macro_nonterminal = false;
58 loop {
59 let next = lexer.next_token();
60 let test = if next.tok == t::EOF {lexer.pos} else {next.sp.lo};
61
62 // The lexer consumes all whitespace and non-doc-comments when iterating
63 // between tokens. If this token isn't directly adjacent to our last
64 // token, then we need to emit the whitespace/comment.
65 //
66 // If the gap has any '/' characters then we consider the whole thing a
67 // comment. This will classify some whitespace as a comment, but that
68 // doesn't matter too much for syntax highlighting purposes.
69 if test > last {
70 let snip = sess.span_diagnostic.cm.span_to_snippet(Span {
71 lo: last,
72 hi: test,
73 expn_info: None,
74 }).unwrap();
75 if snip.as_slice().contains("/") {
76 try!(write!(out, "<span class='comment'>{}</span>",
77 Escape(snip.as_slice())));
78 } else {
79 try!(write!(out, "{}", Escape(snip.as_slice())));
80 }
81 }
82 last = next.sp.hi;
83 if next.tok == t::EOF { break }
84
85 let klass = match next.tok {
86 // If this '&' token is directly adjacent to another token, assume
87 // that it's the address-of operator instead of the and-operator.
88 // This allows us to give all pointers their own class (`Box` and
89 // `@` are below).
90 t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
91 t::AT | t::TILDE => "kw-2",
92
93 // consider this as part of a macro invocation if there was a
94 // leading identifier
95 t::NOT if is_macro => { is_macro = false; "macro" }
96
97 // operators
98 t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT |
99 t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW |
100 t::BINOPEQ(..) | t::FAT_ARROW => "op",
101
102 // miscellaneous, no highlighting
103 t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI |
104 t::COLON | t::MOD_SEP | t::LARROW | t::DARROW | t::LPAREN |
105 t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE => "",
106 t::DOLLAR => {
107 if t::is_ident(&lexer.peek().tok) {
108 is_macro_nonterminal = true;
109 "macro-nonterminal"
110 } else {
111 ""
112 }
113 }
114
115 // This is the start of an attribute. We're going to want to
116 // continue highlighting it as an attribute until the ending ']' is
117 // seen, so skip out early. Down below we terminate the attribute
118 // span when we see the ']'.
119 t::POUND => {
120 is_attribute = true;
121 try!(write!(out, r"<span class='attribute'>\#"));
122 continue
123 }
124 t::RBRACKET => {
125 if is_attribute {
126 is_attribute = false;
127 try!(write!(out, "]</span>"));
128 continue
129 } else {
130 ""
131 }
132 }
133
134 // text literals
135 t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string",
136
137 // number literals
138 t::LIT_INT(..) | t::LIT_UINT(..) | t::LIT_INT_UNSUFFIXED(..) |
139 t::LIT_FLOAT(..) | t::LIT_FLOAT_UNSUFFIXED(..) => "number",
140
141 // keywords are also included in the identifier set
142 t::IDENT(ident, _is_mod_sep) => {
143 match t::get_ident(ident).get() {
144 "ref" | "mut" => "kw-2",
145
146 "self" => "self",
147 "false" | "true" => "boolval",
148
149 "Option" | "Result" => "prelude-ty",
150 "Some" | "None" | "Ok" | "Err" => "prelude-val",
151
152 _ if t::is_any_keyword(&next.tok) => "kw",
153 _ => {
154 if is_macro_nonterminal {
155 is_macro_nonterminal = false;
156 "macro-nonterminal"
157 } else if lexer.peek().tok == t::NOT {
158 is_macro = true;
159 "macro"
160 } else {
161 "ident"
162 }
163 }
164 }
165 }
166
167 t::LIFETIME(..) => "lifetime",
168 t::DOC_COMMENT(..) => "doccomment",
169 t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "",
170 };
171
172 // as mentioned above, use the original source code instead of
173 // stringifying this token
174 let snip = sess.span_diagnostic.cm.span_to_snippet(next.sp).unwrap();
175 if klass == "" {
176 try!(write!(out, "{}", Escape(snip.as_slice())));
177 } else {
178 try!(write!(out, "<span class='{}'>{}</span>", klass,
179 Escape(snip.as_slice())));
180 }
181 }
182
183 write!(out, "</pre>\n")
184 }