Skip to content

Commit ca9a660

Browse files
committed
More Ruby docs
1 parent 085da4f commit ca9a660

File tree

14 files changed

+145
-71
lines changed

14 files changed

+145
-71
lines changed

lib/prism/ffi.rb

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
module Prism
1010
BACKEND = :FFI
1111

12-
module LibRubyParser
12+
module LibRubyParser # :nodoc:
1313
extend FFI::Library
1414

1515
# Define the library that we will be pulling functions from. Note that this
@@ -95,7 +95,7 @@ def self.load_exported_functions_from(header, *functions)
9595

9696
# This object represents a pm_buffer_t. We only use it as an opaque pointer,
9797
# so it doesn't need to know the fields of pm_buffer_t.
98-
class PrismBuffer
98+
class PrismBuffer # :nodoc:
9999
SIZEOF = LibRubyParser.pm_buffer_sizeof
100100

101101
attr_reader :pointer
@@ -133,7 +133,7 @@ def self.with(&block)
133133

134134
# This object represents a pm_string_t. We only use it as an opaque pointer,
135135
# so it doesn't have to be an FFI::Struct.
136-
class PrismString
136+
class PrismString # :nodoc:
137137
SIZEOF = LibRubyParser.pm_string_sizeof
138138

139139
attr_reader :pointer
@@ -168,6 +168,7 @@ def self.with(filepath, &block)
168168
end
169169
end
170170

171+
# Dump the given source into a serialized format.
171172
def self.dump_internal(source, source_size, filepath)
172173
PrismBuffer.with do |buffer|
173174
metadata = [filepath.bytesize, filepath.b, 0].pack("LA*L") if filepath

lib/prism/lex_compat.rb

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ module Prism
88
# of cases, this is a one-to-one mapping of the token type. Everything else
99
# generally lines up. However, there are a few cases that require special
1010
# handling.
11-
class LexCompat
11+
class LexCompat # :nodoc:
1212
# This is a mapping of prism token types to Ripper token types. This is a
1313
# many-to-one mapping because we split up our token types, whereas Ripper
1414
# tends to group them.
@@ -184,18 +184,22 @@ class LexCompat
184184
# However, we add a couple of convenience methods onto them to make them a
185185
# little easier to work with. We delegate all other methods to the array.
186186
class Token < SimpleDelegator
187+
# The location of the token in the source.
187188
def location
188189
self[0]
189190
end
190191

192+
# The type of the token.
191193
def event
192194
self[1]
193195
end
194196

197+
# The slice of the source that this token represents.
195198
def value
196199
self[2]
197200
end
198201

202+
# The state of the lexer when this token was produced.
199203
def state
200204
self[3]
201205
end
@@ -204,15 +208,15 @@ def state
204208
# Ripper doesn't include the rest of the token in the event, so we need to
205209
# trim it down to just the content on the first line when comparing.
206210
class EndContentToken < Token
207-
def ==(other)
211+
def ==(other) # :nodoc:
208212
[self[0], self[1], self[2][0..self[2].index("\n")], self[3]] == other
209213
end
210214
end
211215

212216
# Tokens where state should be ignored
213217
# used for :on_comment, :on_heredoc_end, :on_embexpr_end
214218
class IgnoreStateToken < Token
215-
def ==(other)
219+
def ==(other) # :nodoc:
216220
self[0...-1] == other[0...-1]
217221
end
218222
end
@@ -222,7 +226,7 @@ def ==(other)
222226
# through named captures in regular expressions). In that case we don't
223227
# compare the state.
224228
class IdentToken < Token
225-
def ==(other)
229+
def ==(other) # :nodoc:
226230
(self[0...-1] == other[0...-1]) && (
227231
(other[3] == Ripper::EXPR_LABEL | Ripper::EXPR_END) ||
228232
(other[3] & Ripper::EXPR_ARG_ANY != 0)
@@ -233,7 +237,7 @@ def ==(other)
233237
# Ignored newlines can occasionally have a LABEL state attached to them, so
234238
# we compare the state differently here.
235239
class IgnoredNewlineToken < Token
236-
def ==(other)
240+
def ==(other) # :nodoc:
237241
return false unless self[0...-1] == other[0...-1]
238242

239243
if self[4] == Ripper::EXPR_ARG | Ripper::EXPR_LABELED
@@ -253,7 +257,7 @@ def ==(other)
253257
# more accurately, so we need to allow comparing against both END and
254258
# END|LABEL.
255259
class ParamToken < Token
256-
def ==(other)
260+
def ==(other) # :nodoc:
257261
(self[0...-1] == other[0...-1]) && (
258262
(other[3] == Ripper::EXPR_END) ||
259263
(other[3] == Ripper::EXPR_END | Ripper::EXPR_LABEL)
@@ -264,12 +268,12 @@ def ==(other)
264268
# A heredoc in this case is a list of tokens that belong to the body of the
265269
# heredoc that should be appended onto the list of tokens when the heredoc
266270
# closes.
267-
module Heredoc
271+
module Heredoc # :nodoc:
268272
# Heredocs that are no dash or tilde heredocs are just a list of tokens.
269273
# We need to keep them around so that we can insert them in the correct
270274
# order back into the token stream and set the state of the last token to
271275
# the state that the heredoc was opened in.
272-
class PlainHeredoc
276+
class PlainHeredoc # :nodoc:
273277
attr_reader :tokens
274278

275279
def initialize
@@ -288,7 +292,7 @@ def to_a
288292
# Dash heredocs are a little more complicated. They are a list of tokens
289293
# that need to be split on "\\\n" to mimic Ripper's behavior. We also need
290294
# to keep track of the state that the heredoc was opened in.
291-
class DashHeredoc
295+
class DashHeredoc # :nodoc:
292296
attr_reader :split, :tokens
293297

294298
def initialize(split)
@@ -347,7 +351,7 @@ def to_a
347351
# insert them into the stream in the correct order. As such, we can do
348352
# some extra manipulation on the tokens to make them match Ripper's
349353
# output by mirroring the dedent logic that Ripper uses.
350-
class DedentingHeredoc
354+
class DedentingHeredoc # :nodoc:
351355
TAB_WIDTH = 8
352356

353357
attr_reader :tokens, :dedent_next, :dedent, :embexpr_balance
@@ -588,6 +592,8 @@ def self.build(opening)
588592
end
589593
end
590594

595+
private_constant :Heredoc
596+
591597
attr_reader :source, :filepath
592598

593599
def initialize(source, filepath = "")
@@ -829,9 +835,11 @@ def result
829835
end
830836
end
831837

838+
private_constant :LexCompat
839+
832840
# This is a class that wraps the Ripper lexer to produce almost exactly the
833841
# same tokens.
834-
class LexRipper
842+
class LexRipper # :nodoc:
835843
attr_reader :source
836844

837845
def initialize(source)
@@ -869,4 +877,6 @@ def result
869877
results
870878
end
871879
end
880+
881+
private_constant :LexRipper
872882
end

lib/prism/node_ext.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Here we are reopening the prism module to provide methods on nodes that aren't
44
# templated and are meant as convenience methods.
55
module Prism
6-
module RegularExpressionOptions
6+
module RegularExpressionOptions # :nodoc:
77
# Returns a numeric value that represents the flags that were used to create
88
# the regular expression.
99
def options

lib/prism/node_inspector.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
module Prism
44
# This object is responsible for generating the output for the inspect method
55
# implementations of child nodes.
6-
class NodeInspector
6+
class NodeInspector # :nodoc:
77
attr_reader :prefix, :output
88

99
def initialize(prefix = "")

lib/prism/pack.rb

Lines changed: 41 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# frozen_string_literal: true
22

33
module Prism
4+
# A parser for the pack template language.
45
module Pack
56
%i[
67
SPACE
@@ -54,6 +55,7 @@ module Pack
5455
const_set(const, const)
5556
end
5657

58+
# A directive in the pack template language.
5759
class Directive
5860
attr_reader :version, :variant, :source, :type, :signed, :endian, :size, :length_type, :length
5961

@@ -70,37 +72,37 @@ def initialize(version, variant, source, type, signed, endian, size, length_type
7072
end
7173

7274
ENDIAN_DESCRIPTIONS = {
73-
AGNOSTIC_ENDIAN: 'agnostic',
74-
LITTLE_ENDIAN: 'little-endian (VAX)',
75-
BIG_ENDIAN: 'big-endian (network)',
76-
NATIVE_ENDIAN: 'native-endian',
77-
ENDIAN_NA: 'n/a'
75+
AGNOSTIC_ENDIAN: "agnostic",
76+
LITTLE_ENDIAN: "little-endian (VAX)",
77+
BIG_ENDIAN: "big-endian (network)",
78+
NATIVE_ENDIAN: "native-endian",
79+
ENDIAN_NA: "n/a"
7880
}
7981

8082
SIGNED_DESCRIPTIONS = {
81-
UNSIGNED: 'unsigned',
82-
SIGNED: 'signed',
83-
SIGNED_NA: 'n/a'
83+
UNSIGNED: "unsigned",
84+
SIGNED: "signed",
85+
SIGNED_NA: "n/a"
8486
}
8587

8688
SIZE_DESCRIPTIONS = {
87-
SIZE_SHORT: 'short',
88-
SIZE_INT: 'int-width',
89-
SIZE_LONG: 'long',
90-
SIZE_LONG_LONG: 'long long',
91-
SIZE_8: '8-bit',
92-
SIZE_16: '16-bit',
93-
SIZE_32: '32-bit',
94-
SIZE_64: '64-bit',
95-
SIZE_P: 'pointer-width'
89+
SIZE_SHORT: "short",
90+
SIZE_INT: "int-width",
91+
SIZE_LONG: "long",
92+
SIZE_LONG_LONG: "long long",
93+
SIZE_8: "8-bit",
94+
SIZE_16: "16-bit",
95+
SIZE_32: "32-bit",
96+
SIZE_64: "64-bit",
97+
SIZE_P: "pointer-width"
9698
}
9799

98100
def describe
99101
case type
100102
when SPACE
101-
'whitespace'
103+
"whitespace"
102104
when COMMENT
103-
'comment'
105+
"comment"
104106
when INTEGER
105107
if size == SIZE_8
106108
base = "#{SIGNED_DESCRIPTIONS[signed]} #{SIZE_DESCRIPTIONS[size]} integer"
@@ -115,50 +117,51 @@ def describe
115117
base
116118
end
117119
when LENGTH_MAX
118-
base + ', as many as possible'
120+
base + ", as many as possible"
119121
end
120122
when UTF8
121-
'UTF-8 character'
123+
"UTF-8 character"
122124
when BER
123-
'BER-compressed integer'
125+
"BER-compressed integer"
124126
when FLOAT
125127
"#{SIZE_DESCRIPTIONS[size]} #{ENDIAN_DESCRIPTIONS[endian]} float"
126128
when STRING_SPACE_PADDED
127-
'arbitrary binary string (space padded)'
129+
"arbitrary binary string (space padded)"
128130
when STRING_NULL_PADDED
129-
'arbitrary binary string (null padded, count is width)'
131+
"arbitrary binary string (null padded, count is width)"
130132
when STRING_NULL_TERMINATED
131-
'arbitrary binary string (null padded, count is width), except that null is added with *'
133+
"arbitrary binary string (null padded, count is width), except that null is added with *"
132134
when STRING_MSB
133-
'bit string (MSB first)'
135+
"bit string (MSB first)"
134136
when STRING_LSB
135-
'bit string (LSB first)'
137+
"bit string (LSB first)"
136138
when STRING_HEX_HIGH
137-
'hex string (high nibble first)'
139+
"hex string (high nibble first)"
138140
when STRING_HEX_LOW
139-
'hex string (low nibble first)'
141+
"hex string (low nibble first)"
140142
when STRING_UU
141-
'UU-encoded string'
143+
"UU-encoded string"
142144
when STRING_MIME
143-
'quoted printable, MIME encoding'
145+
"quoted printable, MIME encoding"
144146
when STRING_BASE64
145-
'base64 encoded string'
147+
"base64 encoded string"
146148
when STRING_FIXED
147-
'pointer to a structure (fixed-length string)'
149+
"pointer to a structure (fixed-length string)"
148150
when STRING_POINTER
149-
'pointer to a null-terminated string'
151+
"pointer to a null-terminated string"
150152
when MOVE
151-
'move to absolute position'
153+
"move to absolute position"
152154
when BACK
153-
'back up a byte'
155+
"back up a byte"
154156
when NULL
155-
'null byte'
157+
"null byte"
156158
else
157159
raise
158160
end
159161
end
160162
end
161163

164+
# A class used to describe what a pack template does.
162165
class Format
163166
attr_reader :directives, :encoding
164167

@@ -178,7 +181,7 @@ def describe
178181
" #{source.ljust(source_width)} #{directive.describe}"
179182
end
180183

181-
(['Directives:'] + directive_lines + ['Encoding:', " #{encoding}"]).join("\n")
184+
(["Directives:"] + directive_lines + ["Encoding:", " #{encoding}"]).join("\n")
182185
end
183186
end
184187
end

lib/prism/parse_result.rb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@ def column(value)
5858

5959
private
6060

61+
# Find all of the newlines in the source code and return their byte offsets
62+
# from the start of the string an array.
6163
def compute_offsets(code)
6264
offsets = [0]
6365
code.b.scan("\n") { offsets << $~.end(0) }

lib/prism/parse_result/comments.rb

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class ParseResult
1919
class Comments
2020
# A target for attaching comments that is based on a specific node's
2121
# location.
22-
class NodeTarget
22+
class NodeTarget # :nodoc:
2323
attr_reader :node
2424

2525
def initialize(node)
@@ -46,7 +46,7 @@ def <<(comment)
4646

4747
# A target for attaching comments that is based on a location field on a
4848
# node. For example, the `end` token of a ClassNode.
49-
class LocationTarget
49+
class LocationTarget # :nodoc:
5050
attr_reader :location
5151

5252
def initialize(location)
@@ -70,12 +70,17 @@ def <<(comment)
7070
end
7171
end
7272

73+
# The parse result that we are attaching comments to.
7374
attr_reader :parse_result
7475

76+
# Create a new Comments object that will attach comments to the given
77+
# parse result.
7578
def initialize(parse_result)
7679
@parse_result = parse_result
7780
end
7881

82+
# Attach the comments to their respective locations in the tree by
83+
# mutating the parse result.
7984
def attach!
8085
parse_result.comments.each do |comment|
8186
preceding, enclosing, following = nearest_targets(parse_result.value, comment)

0 commit comments

Comments
 (0)