Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Minor tqdm and logging clean up #4448

Merged
merged 3 commits into from
Jul 10, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion allennlp/commands/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def train_model(
world_size = num_nodes * num_procs

logging.info(
f"Switching to distributed training mode since multiple GPUs are configured"
f"Switching to distributed training mode since multiple GPUs are configured | "
f"Master is at: {master_addr}:{master_port} | Rank of this node: {node_rank} | "
f"Number of workers in this node: {num_procs} | Number of nodes: {num_nodes} | "
f"World size: {world_size}"
Expand Down
2 changes: 1 addition & 1 deletion allennlp/common/file_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def _http_get(url: str, temp_file: IO) -> None:
req = session.get(url, stream=True)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total)
progress = Tqdm.tqdm(unit="B", total=total, desc="downloading")
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
Expand Down
4 changes: 2 additions & 2 deletions allennlp/data/dataset_readers/dataset_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def _instances_to_cache_file(self, cache_filename, instances) -> None:
# Then we just copy the file over to `cache_filename`.
with CacheFile(cache_filename, mode="w+") as cache_handle:
logger.info("Caching instances to temp file %s", cache_handle.name)
for instance in Tqdm.tqdm(instances):
for instance in Tqdm.tqdm(instances, desc="caching instances"):
cache_handle.write(self.serialize_instance(instance) + "\n")

def text_to_instance(self, *inputs) -> Instance:
Expand Down Expand Up @@ -381,7 +381,7 @@ def _multi_worker_islice(

islice = itertools.islice(iterable, start_index, self.max_instances, step_size)
if wrap_with_tqdm:
islice = Tqdm.tqdm(islice)
islice = Tqdm.tqdm(islice, desc="reading instances")

if transform is not None:
return (transform(x) for x in islice)
Expand Down
2 changes: 1 addition & 1 deletion allennlp/data/vocabulary.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def from_instances(
padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
for instance in Tqdm.tqdm(instances, desc="building vocab"):
instance.count_vocab_items(namespace_token_counts)

return cls(
Expand Down