Python - Update CHANGELOG and bump for release

This commit is contained in:
Anthony MOI
2020-12-04 12:30:53 -05:00
committed by Anthony MOI
parent a3a9561f9f
commit 0c6cc39eee
5 changed files with 7 additions and 5 deletions

View File

@ -4,9 +4,10 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased] ## [0.10.0rc1]
### Added ### Added
- [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work
- [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model - [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model
- [#533]: Add support for conda builds - [#533]: Add support for conda builds
- [#542]: Add Split pre-tokenizer to easily split using a pattern - [#542]: Add Split pre-tokenizer to easily split using a pattern
@ -298,6 +299,7 @@ delimiter (Works like `.split(delimiter)`)
[#530]: https://github.com/huggingface/tokenizers/pull/530 [#530]: https://github.com/huggingface/tokenizers/pull/530
[#519]: https://github.com/huggingface/tokenizers/pull/519 [#519]: https://github.com/huggingface/tokenizers/pull/519
[#509]: https://github.com/huggingface/tokenizers/pull/509 [#509]: https://github.com/huggingface/tokenizers/pull/509
[#508]: https://github.com/huggingface/tokenizers/pull/508
[#506]: https://github.com/huggingface/tokenizers/pull/506 [#506]: https://github.com/huggingface/tokenizers/pull/506
[#500]: https://github.com/huggingface/tokenizers/pull/500 [#500]: https://github.com/huggingface/tokenizers/pull/500
[#498]: https://github.com/huggingface/tokenizers/pull/498 [#498]: https://github.com/huggingface/tokenizers/pull/498

View File

@ -1076,7 +1076,7 @@ dependencies = [
[[package]] [[package]]
name = "tokenizers-python" name = "tokenizers-python"
version = "0.9.4" version = "0.10.0-rc1"
dependencies = [ dependencies = [
"crossbeam", "crossbeam",
"env_logger", "env_logger",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "tokenizers-python" name = "tokenizers-python"
version = "0.9.4" version = "0.10.0-rc1"
authors = ["Anthony MOI <m.anthony.moi@gmail.com>"] authors = ["Anthony MOI <m.anthony.moi@gmail.com>"]
edition = "2018" edition = "2018"

View File

@ -1,4 +1,4 @@
__version__ = "0.9.4" __version__ = "0.10.0rc1"
from typing import Tuple, Union, Tuple, List from typing import Tuple, Union, Tuple, List
from enum import Enum from enum import Enum

View File

@ -6,7 +6,7 @@ extras["testing"] = ["pytest"]
setup( setup(
name="tokenizers", name="tokenizers",
version="0.9.4", version="0.10.0rc1",
description="Fast and Customizable Tokenizers", description="Fast and Customizable Tokenizers",
long_description=open("README.md", "r", encoding="utf-8").read(), long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown", long_description_content_type="text/markdown",