Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/asset/vocab_test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ c
a
b
a
c
c
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What exactly is the change for this line? On the Github UI, the diff looks the same to me

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey @Nayef211 this PR does not make any formatting change. The change in vocab_test.txt is to add a new line at the end of the file, so that the test is also able to test this condition.

35 changes: 17 additions & 18 deletions torchtext/csrc/vocab.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
#include <ATen/Parallel.h> // @manual
#include <ATen/Parallel.h> // @manual
#include <common.h>
#include <torch/torch.h> // @manual
#include <vocab.h> // @manual

#include <iostream>
#include <stdexcept>
#include <string>
#include <torch/torch.h> // @manual
#include <vocab.h> // @manual
namespace torchtext {

Vocab::Vocab(StringList tokens,
const c10::optional<int64_t> &default_index)
Vocab::Vocab(StringList tokens, const c10::optional<int64_t> &default_index)
: stoi_(MAX_VOCAB_SIZE, -1), default_index_{default_index} {
for (auto &token : tokens) {
// throw error if duplicate token is found
Expand All @@ -34,8 +34,7 @@ bool Vocab::__contains__(const c10::string_view &token) const {

int64_t Vocab::__getitem__(const c10::string_view &token) const {
int64_t id = _find(token);
if (stoi_[id] != -1)
return stoi_[id];
if (stoi_[id] != -1) return stoi_[id];

// throw error if default_index_ is not set
TORCH_CHECK(default_index_.has_value(),
Expand Down Expand Up @@ -110,8 +109,8 @@ StringList Vocab::lookup_tokens(const std::vector<int64_t> &indices) {
return tokens;
}

std::vector<int64_t>
Vocab::lookup_indices(const std::vector<c10::string_view> &tokens) {
std::vector<int64_t> Vocab::lookup_indices(
const std::vector<c10::string_view> &tokens) {
std::vector<int64_t> indices(tokens.size());
for (size_t i = 0; i < tokens.size(); i++) {
indices[i] = __getitem__(tokens[i]);
Expand Down Expand Up @@ -191,11 +190,9 @@ void parse_raw_text_file_chunk(const std::string &file_path, size_t offset,
}
}

StringList
_concat_tokens(std::vector<std::shared_ptr<IndexDict>> chunk_counters,
const int64_t min_freq, const int64_t num_lines,
const bool sort_tokens) {

StringList _concat_tokens(
std::vector<std::shared_ptr<IndexDict>> chunk_counters,
const int64_t min_freq, const int64_t num_lines, const bool sort_tokens) {
TORCH_CHECK(chunk_counters.size() > 0,
"There must be at least 1 chunk to concatenate!");

Expand All @@ -214,8 +211,11 @@ _concat_tokens(std::vector<std::shared_ptr<IndexDict>> chunk_counters,
tokens_freq[item.first] = cur_token_freq;
}

// add to tokens list only if we exceed min_freq for the first time
if (tokens_freq[item.first] - cur_token_freq < min_freq &&
// add to tokens list only if all of the conditions are met:
// 1. token is not empty
// 2. we exceed min_freq for the first time
if (item.first.length() &&
tokens_freq[item.first] - cur_token_freq < min_freq &&
tokens_freq[item.first] >= min_freq) {
unique_tokens.push_back(item.first);
}
Expand Down Expand Up @@ -248,7 +248,6 @@ _concat_tokens(std::vector<std::shared_ptr<IndexDict>> chunk_counters,
constexpr int64_t GRAIN_SIZE = 13107;
Vocab _load_vocab_from_file(const std::string &file_path,
const int64_t min_freq, const int64_t num_cpus) {

int64_t num_lines = _infer_lines(file_path);
int64_t chunk_size = impl::divup(num_lines, num_cpus);
// Launching a thread on less lines than this likely has too much overhead.
Expand Down Expand Up @@ -374,4 +373,4 @@ c10::intrusive_ptr<Vocab> _deserialize_vocab(VocabStates states) {
return c10::make_intrusive<Vocab>(std::move(strings), default_index);
}

} // namespace torchtext
} // namespace torchtext