Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
canifa_note
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Vũ Hoàng Anh
canifa_note
Commits
42c653e1
Unverified
Commit
42c653e1
authored
May 23, 2023
by
boojack
Committed by
GitHub
May 23, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
feat: implement paragraph and italic parsers (#1725)
parent
8c34be92
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
259 additions
and
5 deletions
+259
-5
bold.go
plugin/gomark/parser/bold.go
+8
-5
italic.go
plugin/gomark/parser/italic.go
+42
-0
italic_test.go
plugin/gomark/parser/italic_test.go
+94
-0
paragraph.go
plugin/gomark/parser/paragraph.go
+30
-0
paragraph_test.go
plugin/gomark/parser/paragraph_test.go
+85
-0
No files found.
plugin/gomark/parser/bold.go
View file @
42c653e1
...
...
@@ -18,25 +18,28 @@ func (*BoldParser) Match(tokens []*tokenizer.Token) *BoldParser {
}
prefixTokens
:=
tokens
[
:
2
]
if
len
(
prefixTokens
)
!=
2
||
prefixTokens
[
0
]
.
Type
!=
prefixTokens
[
1
]
.
Type
{
if
prefixTokens
[
0
]
.
Type
!=
prefixTokens
[
1
]
.
Type
{
return
nil
}
prefixTokenType
:=
prefixTokens
[
0
]
.
Type
if
prefixTokenType
!=
tokenizer
.
Star
&&
prefixTokenType
!=
tokenizer
.
Underline
{
return
nil
}
contentTokens
:=
[]
*
tokenizer
.
Token
{}
cursor
:=
2
cursor
,
matched
:=
2
,
false
for
;
cursor
<
len
(
tokens
)
-
1
;
cursor
++
{
token
,
nextToken
:=
tokens
[
cursor
],
tokens
[
cursor
+
1
]
if
token
.
Type
==
tokenizer
.
Newline
||
nextToken
.
Type
==
tokenizer
.
Newline
{
break
return
nil
}
if
token
.
Type
==
prefixTokenType
&&
nextToken
.
Type
==
prefixTokenType
{
matched
=
true
break
}
contentTokens
=
append
(
contentTokens
,
token
)
}
if
cursor
!=
len
(
tokens
)
-
2
{
if
!
matched
{
return
nil
}
...
...
plugin/gomark/parser/italic.go
0 → 100644
View file @
42c653e1
package
parser
import
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
type
ItalicParser
struct
{
ContentTokens
[]
*
tokenizer
.
Token
}
func
NewItalicParser
()
*
ItalicParser
{
return
&
ItalicParser
{}
}
func
(
*
ItalicParser
)
Match
(
tokens
[]
*
tokenizer
.
Token
)
*
ItalicParser
{
if
len
(
tokens
)
<
3
{
return
nil
}
prefixTokens
:=
tokens
[
:
1
]
if
prefixTokens
[
0
]
.
Type
!=
tokenizer
.
Star
&&
prefixTokens
[
0
]
.
Type
!=
tokenizer
.
Underline
{
return
nil
}
prefixTokenType
:=
prefixTokens
[
0
]
.
Type
contentTokens
:=
[]
*
tokenizer
.
Token
{}
matched
:=
false
for
_
,
token
:=
range
tokens
[
1
:
]
{
if
token
.
Type
==
tokenizer
.
Newline
{
return
nil
}
if
token
.
Type
==
prefixTokenType
{
matched
=
true
break
}
contentTokens
=
append
(
contentTokens
,
token
)
}
if
!
matched
||
len
(
contentTokens
)
==
0
{
return
nil
}
return
&
ItalicParser
{
ContentTokens
:
contentTokens
,
}
}
plugin/gomark/parser/italic_test.go
0 → 100644
View file @
42c653e1
package
parser
import
(
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
func
TestItalicParser
(
t
*
testing
.
T
)
{
tests
:=
[]
struct
{
text
string
italic
*
ItalicParser
}{
{
text
:
"*Hello world!"
,
italic
:
nil
,
},
{
text
:
"*Hello*"
,
italic
:
&
ItalicParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Text
,
Value
:
"Hello"
,
},
},
},
},
{
text
:
"* Hello *"
,
italic
:
&
ItalicParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
{
Type
:
tokenizer
.
Text
,
Value
:
"Hello"
,
},
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
},
},
},
{
text
:
"** Hello * *"
,
italic
:
nil
,
},
{
text
:
"*1* Hello * *"
,
italic
:
&
ItalicParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Text
,
Value
:
"1"
,
},
},
},
},
{
text
:
`* \n * Hello * *`
,
italic
:
&
ItalicParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
{
Type
:
tokenizer
.
Text
,
Value
:
`\n`
,
},
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
},
},
},
{
text
:
"*
\n
* Hello * *"
,
italic
:
nil
,
},
}
for
_
,
test
:=
range
tests
{
tokens
:=
tokenizer
.
Tokenize
(
test
.
text
)
italic
:=
NewItalicParser
()
require
.
Equal
(
t
,
test
.
italic
,
italic
.
Match
(
tokens
))
}
}
plugin/gomark/parser/paragraph.go
0 → 100644
View file @
42c653e1
package
parser
import
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
type
ParagraphParser
struct
{
ContentTokens
[]
*
tokenizer
.
Token
}
func
NewParagraphParser
()
*
ParagraphParser
{
return
&
ParagraphParser
{}
}
func
(
*
ParagraphParser
)
Match
(
tokens
[]
*
tokenizer
.
Token
)
*
ParagraphParser
{
contentTokens
:=
[]
*
tokenizer
.
Token
{}
cursor
:=
0
for
;
cursor
<
len
(
tokens
);
cursor
++
{
token
:=
tokens
[
cursor
]
if
token
.
Type
==
tokenizer
.
Newline
{
break
}
contentTokens
=
append
(
contentTokens
,
token
)
}
if
len
(
contentTokens
)
==
0
{
return
nil
}
return
&
ParagraphParser
{
ContentTokens
:
contentTokens
,
}
}
plugin/gomark/parser/paragraph_test.go
0 → 100644
View file @
42c653e1
package
parser
import
(
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
func
TestParagraphParser
(
t
*
testing
.
T
)
{
tests
:=
[]
struct
{
text
string
paragraph
*
ParagraphParser
}{
{
text
:
""
,
paragraph
:
nil
,
},
{
text
:
"Hello world!"
,
paragraph
:
&
ParagraphParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Text
,
Value
:
"Hello"
,
},
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
{
Type
:
tokenizer
.
Text
,
Value
:
"world!"
,
},
},
},
},
{
text
:
`Hello
world!`
,
paragraph
:
&
ParagraphParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Text
,
Value
:
"Hello"
,
},
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
},
},
},
{
text
:
`Hello \n
world!`
,
paragraph
:
&
ParagraphParser
{
ContentTokens
:
[]
*
tokenizer
.
Token
{
{
Type
:
tokenizer
.
Text
,
Value
:
"Hello"
,
},
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
{
Type
:
tokenizer
.
Text
,
Value
:
`\n`
,
},
{
Type
:
tokenizer
.
Space
,
Value
:
" "
,
},
},
},
},
}
for
_
,
test
:=
range
tests
{
tokens
:=
tokenizer
.
Tokenize
(
test
.
text
)
paragraph
:=
NewParagraphParser
()
require
.
Equal
(
t
,
test
.
paragraph
,
paragraph
.
Match
(
tokens
))
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment