diff --git a/README.md b/README.md index 6255b6b..84c1de6 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ * [Options](#options) * [`decode(data[, options])`](#decodedata-options) * [Options](#options-1) + * [`decodeFirst(data[, options])`](#decodefirstdata-options) * [`encodedLength(data[, options])`](#encodedlengthdata-options) * [Type encoders](#type-encoders) * [Tag decoders](#tag-decoders) @@ -196,10 +197,6 @@ $ cborg json2hex '["a", "b", 1, "😀"]' import { encode } from 'cborg' ``` -```js -const { encode } = require('cborg') -``` - Encode a JavaScript object and return a `Uint8Array` with the CBOR byte representation. * Objects containing circular references will be rejected. @@ -226,10 +223,6 @@ Encode a JavaScript object and return a `Uint8Array` with the CBOR byte represen import { decode } from 'cborg' ``` -```js -const { decode } = require('cborg') -``` - Decode valid CBOR bytes from a `Uint8Array` (or `Buffer`) and return a JavaScript object. * Integers (major 0 and 1) that are outside of the safe integer range will be converted to a `BigInt`. @@ -252,14 +245,47 @@ Decode valid CBOR bytes from a `Uint8Array` (or `Buffer`) and return a JavaScrip * `tags` (array): a mapping of tag number to tag decoder function. By default no tags are supported. See [Tag decoders](#tag-decoders). * `tokenizer` (object): an object with two methods, `next()` which returns a `Token` and `done()` which returns a `boolean`. Can be used to implement custom input decoding. See the source code for examples. -### `encodedLength(data[, options])` +### `decodeFirst(data[, options])` ```js -import { encodedLength } from 'cborg/length' +import { decodeFirst } from 'cborg' ``` +Decode valid CBOR bytes from a `Uint8Array` (or `Buffer`) and return a JavaScript object ***and*** the remainder of the original byte array that was not consumed by the decode. This can be useful for decoding concatenated CBOR objects, which is often used in streaming modes of CBOR. + +The returned remainder `Uint8Array` is a subarray of the original input `Uint8Array` and will share the same underlying buffer. This means that there are no new allocations performed by this function and it is as efficient to use as `decode` but without the additional byte-consumption check. + +The options for `decodeFirst` are the same as for [`decode()`](#decodedata-options), but the return type is different and `decodeFirst()` will not error if a decode operation doesn't consume all of the input bytes. + +The return value is an array with two elements: + +* `value`: the decoded JavaScript object +* `remainder`: a `Uint8Array` containing the bytes that were not consumed by the decode operation + ```js -const { encodedLength } = require('cborg/length') +import { decodeFirst } from 'cborg' + +let buf = Buffer.from('a16474686973a26269736543424f522163796179f564746869736269736543424f522163796179f5', 'hex') +while (buf.length) { + const [value, remainder] = decodeFirst(buf) + console.log('decoded:', value) + buf = remainder +} +``` + +``` +decoded: { this: { is: 'CBOR!', yay: true } } +decoded: this +decoded: is +decoded: CBOR! +decoded: yay +decoded: true +``` + +### `encodedLength(data[, options])` + +```js +import { encodedLength } from 'cborg/length' ``` Calculate the byte length of the given data when encoded as CBOR with the options provided. The options are the same as for an `encode()` call. This calculation will be accurate if the same options are used as when performing a normal `encode()`. Some encode options can change the encoding output length. @@ -400,7 +426,7 @@ There are a number of forms where an object will not round-trip precisely, if th **cborg** can also encode and decode JSON using the same pipeline and many of the same settings. For most (but not all) cases it will be faster to use `JSON.parse()` and `JSON.stringify()`, however **cborg** provides much more control over the process to handle determinism and be more restrictive in allowable forms. It also operates natively with Uint8Arrays rather than strings which may also offer some minor efficiency or usability gains in some circumstances. -Use `import { encode, decode } from 'cborg/json'` or `const { encode, decode } = require('cborg/json')` to access the JSON handling encoder and decoder. +Use `import { encode, decode } from 'cborg/json'` to access the JSON handling encoder and decoder. Many of the same encode and decode options available for CBOR can be used to manage JSON handling. These include strictness requirements for decode and custom tag encoders for encode. Tag encoders can't create new tags as there are no tags in JSON, but they can replace JavaScript object forms with custom JSON forms (e.g. convert a `Uint8Array` to a valid JSON form rather than having the encoder throw an error). The inverse is also possible, turning specific JSON forms into JavaScript forms, by using a custom tokenizer on decode. diff --git a/cborg.js b/cborg.js index d3f863c..90bde20 100644 --- a/cborg.js +++ b/cborg.js @@ -1,5 +1,5 @@ import { encode } from './lib/encode.js' -import { decode } from './lib/decode.js' +import { decode, decodeFirst } from './lib/decode.js' import { Token, Type } from './lib/token.js' /** @@ -13,6 +13,7 @@ import { Token, Type } from './lib/token.js' export { decode, + decodeFirst, encode, Token, Type diff --git a/interface.ts b/interface.ts index 70cac2e..020264d 100644 --- a/interface.ts +++ b/interface.ts @@ -26,7 +26,8 @@ export type QuickEncodeToken = (token: Token) => Uint8Array | undefined export interface DecodeTokenizer { done(): boolean, - next(): Token + next(): Token, + pos(): number, } export type TagDecoder = (inner: any) => any diff --git a/lib/decode.js b/lib/decode.js index 9d6703f..5af4516 100644 --- a/lib/decode.js +++ b/lib/decode.js @@ -24,17 +24,21 @@ class Tokeniser { * @param {DecodeOptions} options */ constructor (data, options = {}) { - this.pos = 0 + this._pos = 0 this.data = data this.options = options } + pos () { + return this._pos + } + done () { - return this.pos >= this.data.length + return this._pos >= this.data.length } next () { - const byt = this.data[this.pos] + const byt = this.data[this._pos] let token = quick[byt] if (token === undefined) { const decoder = jump[byt] @@ -44,10 +48,10 @@ class Tokeniser { throw new Error(`${decodeErrPrefix} no decoder for major type ${byt >>> 5} (byte 0x${byt.toString(16).padStart(2, '0')})`) } const minor = byt & 31 - token = decoder(this.data, this.pos, minor, this.options) + token = decoder(this.data, this._pos, minor, this.options) } // @ts-ignore we get to assume encodedLength is set (crossing fingers slightly) - this.pos += token.encodedLength + this._pos += token.encodedLength return token } } @@ -171,9 +175,9 @@ function tokensToObject (tokeniser, options) { /** * @param {Uint8Array} data * @param {DecodeOptions} [options] - * @returns {any} + * @returns {[any, Uint8Array]} */ -function decode (data, options) { +function decodeFirst (data, options) { if (!(data instanceof Uint8Array)) { throw new Error(`${decodeErrPrefix} data to decode must be a Uint8Array`) } @@ -186,10 +190,20 @@ function decode (data, options) { if (decoded === BREAK) { throw new Error(`${decodeErrPrefix} got unexpected break`) } - if (!tokeniser.done()) { + return [decoded, data.subarray(tokeniser.pos())] +} + +/** + * @param {Uint8Array} data + * @param {DecodeOptions} [options] + * @returns {any} + */ +function decode (data, options) { + const [decoded, remainder] = decodeFirst(data, options) + if (remainder.length > 0) { throw new Error(`${decodeErrPrefix} too many terminals, data makes no sense`) } return decoded } -export { Tokeniser, tokensToObject, decode } +export { Tokeniser, tokensToObject, decode, decodeFirst } diff --git a/lib/json/decode.js b/lib/json/decode.js index 2a40465..2a324de 100644 --- a/lib/json/decode.js +++ b/lib/json/decode.js @@ -17,7 +17,7 @@ class Tokenizer { * @param {DecodeOptions} options */ constructor (data, options = {}) { - this.pos = 0 + this._pos = 0 this.data = data this.options = options /** @type {string[]} */ @@ -25,18 +25,22 @@ class Tokenizer { this.lastToken = '' } + pos () { + return this._pos + } + /** * @returns {boolean} */ done () { - return this.pos >= this.data.length + return this._pos >= this.data.length } /** * @returns {number} */ ch () { - return this.data[this.pos] + return this.data[this._pos] } /** @@ -50,7 +54,7 @@ class Tokenizer { let c = this.ch() // @ts-ignore while (c === 32 /* ' ' */ || c === 9 /* '\t' */ || c === 13 /* '\r' */ || c === 10 /* '\n' */) { - c = this.data[++this.pos] + c = this.data[++this._pos] } } @@ -58,18 +62,18 @@ class Tokenizer { * @param {number[]} str */ expect (str) { - if (this.data.length - this.pos < str.length) { - throw new Error(`${decodeErrPrefix} unexpected end of input at position ${this.pos}`) + if (this.data.length - this._pos < str.length) { + throw new Error(`${decodeErrPrefix} unexpected end of input at position ${this._pos}`) } for (let i = 0; i < str.length; i++) { - if (this.data[this.pos++] !== str[i]) { - throw new Error(`${decodeErrPrefix} unexpected token at position ${this.pos}, expected to find '${String.fromCharCode(...str)}'`) + if (this.data[this._pos++] !== str[i]) { + throw new Error(`${decodeErrPrefix} unexpected token at position ${this._pos}, expected to find '${String.fromCharCode(...str)}'`) } } } parseNumber () { - const startPos = this.pos + const startPos = this._pos let negative = false let float = false @@ -80,7 +84,7 @@ class Tokenizer { while (!this.done()) { const ch = this.ch() if (chars.includes(ch)) { - this.pos++ + this._pos++ } else { break } @@ -90,47 +94,47 @@ class Tokenizer { // lead if (this.ch() === 45) { // '-' negative = true - this.pos++ + this._pos++ } if (this.ch() === 48) { // '0' - this.pos++ + this._pos++ if (this.ch() === 46) { // '.' - this.pos++ + this._pos++ float = true } else { - return new Token(Type.uint, 0, this.pos - startPos) + return new Token(Type.uint, 0, this._pos - startPos) } } swallow([48, 49, 50, 51, 52, 53, 54, 55, 56, 57]) // DIGIT - if (negative && this.pos === startPos + 1) { - throw new Error(`${decodeErrPrefix} unexpected token at position ${this.pos}`) + if (negative && this._pos === startPos + 1) { + throw new Error(`${decodeErrPrefix} unexpected token at position ${this._pos}`) } if (!this.done() && this.ch() === 46) { // '.' if (float) { - throw new Error(`${decodeErrPrefix} unexpected token at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} unexpected token at position ${this._pos}`) } float = true - this.pos++ + this._pos++ swallow([48, 49, 50, 51, 52, 53, 54, 55, 56, 57]) // DIGIT } if (!this.done() && (this.ch() === 101 || this.ch() === 69)) { // '[eE]' float = true - this.pos++ + this._pos++ if (!this.done() && (this.ch() === 43 || this.ch() === 45)) { // '+', '-' - this.pos++ + this._pos++ } swallow([48, 49, 50, 51, 52, 53, 54, 55, 56, 57]) // DIGIT } // @ts-ignore - const numStr = String.fromCharCode.apply(null, this.data.subarray(startPos, this.pos)) + const numStr = String.fromCharCode.apply(null, this.data.subarray(startPos, this._pos)) const num = parseFloat(numStr) if (float) { - return new Token(Type.float, num, this.pos - startPos) + return new Token(Type.float, num, this._pos - startPos) } if (this.options.allowBigInt !== true || Number.isSafeInteger(num)) { - return new Token(num >= 0 ? Type.uint : Type.negint, num, this.pos - startPos) + return new Token(num >= 0 ? Type.uint : Type.negint, num, this._pos - startPos) } - return new Token(num >= 0 ? Type.uint : Type.negint, BigInt(numStr), this.pos - startPos) + return new Token(num >= 0 ? Type.uint : Type.negint, BigInt(numStr), this._pos - startPos) } /** @@ -140,31 +144,31 @@ class Tokenizer { /* c8 ignore next 4 */ if (this.ch() !== 34) { // '"' // this would be a programming error - throw new Error(`${decodeErrPrefix} unexpected character at position ${this.pos}; this shouldn't happen`) + throw new Error(`${decodeErrPrefix} unexpected character at position ${this._pos}; this shouldn't happen`) } - this.pos++ + this._pos++ // check for simple fast-path, all printable ascii, no escapes // >0x10000 elements may fail fn.apply() (http://stackoverflow.com/a/22747272/680742) - for (let i = this.pos, l = 0; i < this.data.length && l < 0x10000; i++, l++) { + for (let i = this._pos, l = 0; i < this.data.length && l < 0x10000; i++, l++) { const ch = this.data[i] if (ch === 92 || ch < 32 || ch >= 128) { // '\', ' ', control-chars or non-trivial break } if (ch === 34) { // '"' // @ts-ignore - const str = String.fromCharCode.apply(null, this.data.subarray(this.pos, i)) - this.pos = i + 1 + const str = String.fromCharCode.apply(null, this.data.subarray(this._pos, i)) + this._pos = i + 1 return new Token(Type.string, str, l) } } - const startPos = this.pos + const startPos = this._pos const chars = [] const readu4 = () => { - if (this.pos + 4 >= this.data.length) { - throw new Error(`${decodeErrPrefix} unexpected end of unicode escape sequence at position ${this.pos}`) + if (this._pos + 4 >= this.data.length) { + throw new Error(`${decodeErrPrefix} unexpected end of unicode escape sequence at position ${this._pos}`) } let u4 = 0 for (let i = 0; i < 4; i++) { @@ -176,10 +180,10 @@ class Tokenizer { } else if (ch >= 65 && ch <= 70) { // 'A' && 'F' ch = ch - 65 + 10 } else { - throw new Error(`${decodeErrPrefix} unexpected unicode escape character at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} unexpected unicode escape character at position ${this._pos}`) } u4 = u4 * 16 + ch - this.pos++ + this._pos++ } return u4 } @@ -191,8 +195,8 @@ class Tokenizer { /* c8 ignore next 1 */ let bytesPerSequence = (firstByte > 0xef) ? 4 : (firstByte > 0xdf) ? 3 : (firstByte > 0xbf) ? 2 : 1 - if (this.pos + bytesPerSequence > this.data.length) { - throw new Error(`${decodeErrPrefix} unexpected unicode sequence at position ${this.pos}`) + if (this._pos + bytesPerSequence > this.data.length) { + throw new Error(`${decodeErrPrefix} unexpected unicode sequence at position ${this._pos}`) } let secondByte, thirdByte, fourthByte, tempCodePoint @@ -206,7 +210,7 @@ class Tokenizer { } break case 2: - secondByte = this.data[this.pos + 1] + secondByte = this.data[this._pos + 1] if ((secondByte & 0xc0) === 0x80) { tempCodePoint = (firstByte & 0x1f) << 0x6 | (secondByte & 0x3f) if (tempCodePoint > 0x7f) { @@ -215,8 +219,8 @@ class Tokenizer { } break case 3: - secondByte = this.data[this.pos + 1] - thirdByte = this.data[this.pos + 2] + secondByte = this.data[this._pos + 1] + thirdByte = this.data[this._pos + 2] if ((secondByte & 0xc0) === 0x80 && (thirdByte & 0xc0) === 0x80) { tempCodePoint = (firstByte & 0xf) << 0xc | (secondByte & 0x3f) << 0x6 | (thirdByte & 0x3f) /* c8 ignore next 3 */ @@ -226,9 +230,9 @@ class Tokenizer { } break case 4: - secondByte = this.data[this.pos + 1] - thirdByte = this.data[this.pos + 2] - fourthByte = this.data[this.pos + 3] + secondByte = this.data[this._pos + 1] + thirdByte = this.data[this._pos + 2] + fourthByte = this.data[this._pos + 3] if ((secondByte & 0xc0) === 0x80 && (thirdByte & 0xc0) === 0x80 && (fourthByte & 0xc0) === 0x80) { tempCodePoint = (firstByte & 0xf) << 0x12 | (secondByte & 0x3f) << 0xc | (thirdByte & 0x3f) << 0x6 | (fourthByte & 0x3f) if (tempCodePoint > 0xffff && tempCodePoint < 0x110000) { @@ -251,7 +255,7 @@ class Tokenizer { } chars.push(codePoint) - this.pos += bytesPerSequence + this._pos += bytesPerSequence } // TODO: could take the approach of a quick first scan for special chars like encoding/json/decode.go#unquoteBytes @@ -261,12 +265,12 @@ class Tokenizer { let ch1 switch (ch) { case 92: // '\' - this.pos++ + this._pos++ if (this.done()) { - throw new Error(`${decodeErrPrefix} unexpected string termination at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} unexpected string termination at position ${this._pos}`) } ch1 = this.ch() - this.pos++ + this._pos++ switch (ch1) { case 34: // '"' case 39: // '\'' @@ -293,25 +297,25 @@ class Tokenizer { chars.push(readu4()) break default: - throw new Error(`${decodeErrPrefix} unexpected string escape character at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} unexpected string escape character at position ${this._pos}`) } break case 34: // '"' - this.pos++ - return new Token(Type.string, decodeCodePointsArray(chars), this.pos - startPos) + this._pos++ + return new Token(Type.string, decodeCodePointsArray(chars), this._pos - startPos) default: if (ch < 32) { // ' ' - throw new Error(`${decodeErrPrefix} invalid control character at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} invalid control character at position ${this._pos}`) } else if (ch < 0x80) { chars.push(ch) - this.pos++ + this._pos++ } else { readUtf8Char() } } } - throw new Error(`${decodeErrPrefix} unexpected end of string at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} unexpected end of string at position ${this._pos}`) } /** @@ -321,11 +325,11 @@ class Tokenizer { switch (this.ch()) { case 123: // '{' this.modeStack.push('obj-start') - this.pos++ + this._pos++ return new Token(Type.map, Infinity, 1) case 91: // '[' this.modeStack.push('array-start') - this.pos++ + this._pos++ return new Token(Type.array, Infinity, 1) case 34: { // '"' return this.parseString() @@ -352,7 +356,7 @@ class Tokenizer { case 57: // '9' return this.parseNumber() default: - throw new Error(`${decodeErrPrefix} unexpected character at position ${this.pos}`) + throw new Error(`${decodeErrPrefix} unexpected character at position ${this._pos}`) } } @@ -368,14 +372,14 @@ class Tokenizer { case 'array-value': { this.modeStack.pop() if (this.ch() === 93) { // ']' - this.pos++ + this._pos++ this.skipWhitespace() return new Token(Type.break, undefined, 1) } if (this.ch() !== 44) { // ',' - throw new Error(`${decodeErrPrefix} unexpected character at position ${this.pos}, was expecting array delimiter but found '${String.fromCharCode(this.ch())}'`) + throw new Error(`${decodeErrPrefix} unexpected character at position ${this._pos}, was expecting array delimiter but found '${String.fromCharCode(this.ch())}'`) } - this.pos++ + this._pos++ this.modeStack.push('array-value') this.skipWhitespace() return this.parseValue() @@ -383,7 +387,7 @@ class Tokenizer { case 'array-start': { this.modeStack.pop() if (this.ch() === 93) { // ']' - this.pos++ + this._pos++ this.skipWhitespace() return new Token(Type.break, undefined, 1) } @@ -395,28 +399,28 @@ class Tokenizer { case 'obj-key': if (this.ch() === 125) { // '}' this.modeStack.pop() - this.pos++ + this._pos++ this.skipWhitespace() return new Token(Type.break, undefined, 1) } if (this.ch() !== 44) { // ',' - throw new Error(`${decodeErrPrefix} unexpected character at position ${this.pos}, was expecting object delimiter but found '${String.fromCharCode(this.ch())}'`) + throw new Error(`${decodeErrPrefix} unexpected character at position ${this._pos}, was expecting object delimiter but found '${String.fromCharCode(this.ch())}'`) } - this.pos++ + this._pos++ this.skipWhitespace() case 'obj-start': { // eslint-disable-line no-fallthrough this.modeStack.pop() if (this.ch() === 125) { // '}' - this.pos++ + this._pos++ this.skipWhitespace() return new Token(Type.break, undefined, 1) } const token = this.parseString() this.skipWhitespace() if (this.ch() !== 58) { // ':' - throw new Error(`${decodeErrPrefix} unexpected character at position ${this.pos}, was expecting key/value delimiter ':' but found '${String.fromCharCode(this.ch())}'`) + throw new Error(`${decodeErrPrefix} unexpected character at position ${this._pos}, was expecting key/value delimiter ':' but found '${String.fromCharCode(this.ch())}'`) } - this.pos++ + this._pos++ this.modeStack.push('obj-value') return token } @@ -428,7 +432,7 @@ class Tokenizer { } /* c8 ignore next 2 */ default: - throw new Error(`${decodeErrPrefix} unexpected parse state at position ${this.pos}; this shouldn't happen`) + throw new Error(`${decodeErrPrefix} unexpected parse state at position ${this._pos}; this shouldn't happen`) } } } diff --git a/test/test-partial.js b/test/test-partial.js new file mode 100644 index 0000000..5378588 --- /dev/null +++ b/test/test-partial.js @@ -0,0 +1,111 @@ +/* eslint-env mocha */ + +import chai from 'chai' +import { garbage } from 'ipld-garbage' +import { uintBoundaries } from '../lib/0uint.js' +import { encode, decodeFirst } from '../cborg.js' +import { dateDecoder, dateEncoder } from './common.js' + +const { assert } = chai + +function verifyPartial (objects, options) { + const encoded = [] + const lengths = [] + let length = 0 + for (const object of Array.isArray(objects) ? objects : [objects]) { + encoded.push(encode(object, options)) + const l = encoded[encoded.length - 1].length + length += l + lengths.push(l) + } + const buf = new Uint8Array(length) + let offset = 0 + for (const enc of encoded) { + buf.set(enc, offset) + offset += enc.length + } + let partial = buf + for (let ii = 0; ii < encoded.length; ii++) { + const [decoded, remainder] = decodeFirst(partial, options) + assert.deepEqual(decoded, objects[ii]) + assert.equal(remainder.length, partial.length - lengths[ii]) + partial = remainder + } + assert.equal(partial.length, 0) // just to be sure +} + +describe('decodePartial', () => { + describe('multiple', () => { + it('simple', () => { + verifyPartial([1, 2, 3]) + verifyPartial([8.940696716308594e-08, 1]) + verifyPartial([ + [], + [1, 2, { obj: 1.5 }, null, new Uint8Array([1, 2, 3])], + { boop: true, bop: 1 }, + 'nope', + { o: 'nope' }, + new Uint8Array([1, 2, 3]), + true, + null + ]) + }) + + it('options', () => { + const m = new Map() + m.set('a', 1) + m.set('b', null) + m.set('c', 'grok') + m.set('date', new Date('2013-03-21T20:04:00Z')) + verifyPartial( + [8.940696716308594e-08, 1, null, 'grok', new Date('2013-03-21T20:04:00Z'), + [8.940696716308594e-08, 1, null, 'grok', new Date('2013-03-21T20:04:00Z')], + m + ], + { typeEncoders: { Date: dateEncoder }, useMaps: true, tags: { 0: dateDecoder } }) + }) + + it('garbage', function () { + this.timeout(10000) + for (let ii = 0; ii < 10; ii++) { + const gbg = [] + for (let ii = 0; ii < 100; ii++) { + gbg.push(garbage(1 << 6, { weights: { CID: 0 } })) + } + verifyPartial(gbg) + } + }) + }) + + it('singular', () => { + it('int boundaries', () => { + for (let ii = 0; ii < 4; ii++) { + verifyPartial(uintBoundaries[ii]) + verifyPartial(uintBoundaries[ii] - 1) + verifyPartial(uintBoundaries[ii] + 1) + verifyPartial(-1 * uintBoundaries[ii]) + verifyPartial(-1 * uintBoundaries[ii] - 1) + verifyPartial(-1 * uintBoundaries[ii] + 1) + } + }) + + it('tags', () => { + verifyPartial({ date: new Date('2013-03-21T20:04:00Z') }, { typeEncoders: { Date: dateEncoder } }) + }) + + it('floats', () => { + verifyPartial(0.5) + verifyPartial(0.5, { float64: true }) + verifyPartial(8.940696716308594e-08) + verifyPartial(8.940696716308594e-08, { float64: true }) + }) + + it('small garbage', function () { + this.timeout(10000) + for (let ii = 0; ii < 1000; ii++) { + const gbg = garbage(1 << 6, { weights: { CID: 0 } }) + verifyPartial(gbg) + } + }) + }) +})