调整测试例以适应pytest;为测试例添加标签

This commit is contained in:
x54-729 2022-05-02 05:46:47 +00:00
parent d645e88c7f
commit 4bfb509f12
27 changed files with 654 additions and 285 deletions

View File

@ -178,11 +178,11 @@ def dump_fastnlp_backend(default:bool = False, backend=None):
os.makedirs(os.path.dirname(env_path), exist_ok=True) os.makedirs(os.path.dirname(env_path), exist_ok=True)
envs = {} envs = {}
assert backend in SUPPORT_BACKENDS, f"fastNLP only supports {SUPPORT_BACKENDS} right now."
if backend is None: if backend is None:
if FASTNLP_BACKEND in os.environ: if FASTNLP_BACKEND in os.environ:
envs[FASTNLP_BACKEND] = os.environ[FASTNLP_BACKEND] envs[FASTNLP_BACKEND] = os.environ[FASTNLP_BACKEND]
else: else:
assert backend in SUPPORT_BACKENDS, f"fastNLP only supports {SUPPORT_BACKENDS} right now."
envs[FASTNLP_BACKEND] = backend envs[FASTNLP_BACKEND] = backend
if len(envs): if len(envs):
with open(env_path, 'w', encoding='utf8') as f: with open(env_path, 'w', encoding='utf8') as f:

View File

@ -0,0 +1,237 @@
import os
import sys
import time
# os.environ["cuda_archs"] = "61"
# os.environ["FAS"]
os.environ["log_silent"] = "1"
sys.path.append("../../../")
from datasets import load_dataset
from datasets import DatasetDict
import jittor as jt
from jittor import nn, Module
from jittor.dataset import Dataset
jt.flags.use_cuda = True
from fastNLP.core.controllers.trainer import Trainer
from fastNLP.core.metrics.accuracy import Accuracy
from fastNLP.core.vocabulary import Vocabulary
from fastNLP.core.callbacks.progress_callback import RichCallback
from fastNLP.core.callbacks.callback import Callback
from fastNLP.core.dataloaders.jittor_dataloader.fdl import JittorDataLoader
class TextClassificationDataset(Dataset):
def __init__(self, dataset):
super(TextClassificationDataset, self).__init__()
self.dataset = dataset
self.set_attrs(total_len=len(dataset))
def __getitem__(self, idx):
return {"x": self.dataset["input_ids"][idx], "y": self.dataset["label"][idx]}
class LSTM(Module):
def __init__(self, num_of_words, hidden_size, features):
self.embedding = nn.Embedding(num_of_words, features)
self.lstm = nn.LSTM(features, hidden_size, batch_first=True)
self.layer = nn.Linear(hidden_size, 2)
self.softmax = nn.Softmax(dim=1)
self.loss_fn = nn.CrossEntropyLoss()
self.hidden_size = hidden_size
self.features = features
def init_hidden(self, x):
# batch_first
batch_size = x.shape[0]
h0 = jt.randn(1, batch_size, hidden_size)
c0 = jt.randn(1, batch_size, hidden_size)
return h0, c0
def execute(self, input_ids):
output = self.embedding(input_ids)
# TODO 去除padding
output, (h, c) = self.lstm(output, self.init_hidden(output))
# len, batch, hidden_size
output = self.layer(output[-1])
return output
def train_step(self, x, y):
x = self(x)
outputs = self.loss_fn(x, y)
return {"loss": outputs}
def evaluate_step(self, x, y):
x = self(x)
return {"pred": x, "target": y.reshape((-1,))}
class PrintWhileTrainingCallBack(Callback):
"""
通过该Callback实现训练过程中loss的输出
"""
def __init__(self, print_every_epoch, print_every_batch):
self.print_every_epoch = print_every_epoch
self.print_every_batch = print_every_batch
self.loss = 0
self.start = 0
self.epoch_start = 0
def on_train_begin(self, trainer):
"""
在训练开始前输出信息
"""
print("Start training. Total {} epochs and {} batches in each epoch.".format(
trainer.n_epochs, trainer.num_batches_per_epoch
))
self.start = time.time()
def on_before_backward(self, trainer, outputs):
"""
每次反向传播前统计loss用于计算平均值
"""
loss = trainer.extract_loss_from_outputs(outputs)
loss = trainer.driver.tensor_to_numeric(loss)
self.loss += loss
def on_train_epoch_begin(self, trainer):
self.epoch_start = time.time()
def on_train_epoch_end(self, trainer):
"""
在每经过一定epoch或最后一个epoch时输出当前epoch的平均loss和使用时间
"""
if trainer.cur_epoch_idx % self.print_every_epoch == 0 \
or trainer.cur_epoch_idx == trainer.n_epochs:
print("Epoch: {} Loss: {} Current epoch training time: {}s".format(
trainer.cur_epoch_idx, self.loss / trainer.num_batches_per_epoch, time.time() - self.epoch_start
))
# 将loss清零
self.loss = 0
def on_train_batch_end(self, trainer):
"""
在每经过一定batch或最后一个batch时输出当前epoch截止目前的平均loss
"""
if trainer.batch_idx_in_epoch % self.print_every_batch == 0 \
or trainer.batch_idx_in_epoch == trainer.num_batches_per_epoch:
print("\tBatch: {} Loss: {}".format(
trainer.batch_idx_in_epoch, self.loss / trainer.batch_idx_in_epoch
))
def on_train_end(self, trainer):
print("Total training time: {}s".format(time.time() - self.start))
def process_data(ds: DatasetDict, vocabulary: Vocabulary, max_len=256) -> DatasetDict:
# 分词
ds = ds.map(lambda x: {"input_ids": text_to_id(vocabulary, x["text"], max_len)})
ds.set_format(type="numpy", columns=ds.column_names)
return ds
def set_vocabulary(vocab, dataset):
for data in dataset:
vocab.update(data["text"].split())
return vocab
def text_to_id(vocab, text: str, max_len):
text = text.split()
# to index
ids = [vocab.to_index(word) for word in text]
# padding
ids += [vocab.padding_idx] * (max_len - len(text))
return ids[:max_len]
def get_dataset(name, max_len, train_format="", test_format=""):
# datasets
train_dataset = load_dataset(name, split="train" + train_format).shuffle(seed=123)
test_dataset = load_dataset(name, split="test" + test_format).shuffle(seed=321)
split = train_dataset.train_test_split(test_size=0.2, seed=123)
train_dataset = split["train"]
val_dataset = split["test"]
vocab = Vocabulary()
vocab = set_vocabulary(vocab, train_dataset)
vocab = set_vocabulary(vocab, val_dataset)
train_dataset = process_data(train_dataset, vocab, max_len)
val_dataset = process_data(val_dataset, vocab, max_len)
test_dataset = process_data(test_dataset, vocab, max_len)
return TextClassificationDataset(train_dataset), TextClassificationDataset(val_dataset), \
TextClassificationDataset(test_dataset), vocab
if __name__ == "__main__":
# 训练参数
max_len = 20
epochs = 40
lr = 1
batch_size = 64
features = 100
hidden_size = 128
# 获取数据集
# imdb.py SetFit/sst2
train_data, val_data, test_data, vocab = get_dataset("SetFit/sst2", max_len, "", "")
# 使用dataloader
train_dataloader = JittorDataLoader(
dataset=train_data,
batch_size=batch_size,
shuffle=True,
num_workers=4,
)
val_dataloader = JittorDataLoader(
dataset=val_data,
batch_size=batch_size,
shuffle=True,
num_workers=4,
)
test_dataloader = JittorDataLoader(
dataset=test_data,
batch_size=1,
shuffle=False,
)
# 初始化模型
model = LSTM(len(vocab), hidden_size, features)
# 优化器
# 也可以是多个优化器的list
optimizer = nn.SGD(model.parameters(), lr)
# Metrics
metrics = {"acc": Accuracy()}
# callbacks
callbacks = [
PrintWhileTrainingCallBack(print_every_epoch=1, print_every_batch=10),
# RichCallback(), # print_every参数默认为1即每一个batch更新一次进度条
]
trainer = Trainer(
model=model,
driver="jittor",
device=[0,1,2,3,4],
optimizers=optimizer,
train_dataloader=train_dataloader,
validate_dataloaders=val_dataloader,
validate_every=-1,
input_mapping=None,
output_mapping=None,
metrics=metrics,
n_epochs=epochs,
callbacks=callbacks,
# progress_bar="raw"
)
trainer.run()

View File

@ -0,0 +1,110 @@
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""IMDB movie reviews dataset."""
import datasets
from datasets.tasks import TextClassification
_DESCRIPTION = """\
Large Movie Review Dataset.
This is a dataset for binary sentiment classification containing substantially \
more data than previous benchmark datasets. We provide a set of 25,000 highly \
polar movie reviews for training, and 25,000 for testing. There is additional \
unlabeled data for use as well.\
"""
_CITATION = """\
@InProceedings{maas-EtAl:2011:ACL-HLT2011,
author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
title = {Learning Word Vectors for Sentiment Analysis},
booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
month = {June},
year = {2011},
address = {Portland, Oregon, USA},
publisher = {Association for Computational Linguistics},
pages = {142--150},
url = {http://www.aclweb.org/anthology/P11-1015}
}
"""
_DOWNLOAD_URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
class IMDBReviewsConfig(datasets.BuilderConfig):
"""BuilderConfig for IMDBReviews."""
def __init__(self, **kwargs):
"""BuilderConfig for IMDBReviews.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IMDBReviewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class Imdb(datasets.GeneratorBasedBuilder):
"""IMDB movie reviews dataset."""
BUILDER_CONFIGS = [
IMDBReviewsConfig(
name="plain_text",
description="Plain text",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])}
),
supervised_keys=None,
homepage="http://ai.stanford.edu/~amaas/data/sentiment/",
citation=_CITATION,
task_templates=[TextClassification(text_column="text", label_column="label")],
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DOWNLOAD_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "test"}
),
datasets.SplitGenerator(
name=datasets.Split("unsupervised"),
gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train", "labeled": False},
),
]
def _generate_examples(self, files, split, labeled=True):
"""Generate aclImdb examples."""
# For labeled examples, extract the label from the path.
if labeled:
label_mapping = {"pos": 1, "neg": 0}
for path, f in files:
if path.startswith(f"aclImdb/{split}"):
label = label_mapping.get(path.split("/")[2])
if label is not None:
yield path, {"text": f.read().decode("utf-8"), "label": label}
else:
for path, f in files:
if path.startswith(f"aclImdb/{split}"):
if path.split("/")[2] == "unsup":
yield path, {"text": f.read().decode("utf-8"), "label": -1}

View File

@ -65,6 +65,7 @@ def model_and_optimizers():
@pytest.mark.parametrize("driver,device", [("torch", "cpu")]) # , ("torch", 6), ("torch", [6, 7]) @pytest.mark.parametrize("driver,device", [("torch", "cpu")]) # , ("torch", 6), ("torch", [6, 7])
@pytest.mark.parametrize("callbacks", [[RecordTrainerEventTriggerCallback()]]) @pytest.mark.parametrize("callbacks", [[RecordTrainerEventTriggerCallback()]])
@pytest.mark.torch
@magic_argv_env_context @magic_argv_env_context
def test_trainer_event_trigger( def test_trainer_event_trigger(
model_and_optimizers: TrainerParameters, model_and_optimizers: TrainerParameters,

View File

@ -7,16 +7,16 @@ from tests.helpers.utils import magic_argv_env_context
@magic_argv_env_context @magic_argv_env_context
def test_trainer_torch_without_evaluator(): def test_trainer_torch_without_evaluator():
@Trainer.on(Events.ON_TRAIN_EPOCH_BEGIN(every=10)) @Trainer.on(Events.on_train_epoch_begin(every=10))
def fn1(trainer): def fn1(trainer):
pass pass
@Trainer.on(Events.ON_TRAIN_BATCH_BEGIN(every=10)) @Trainer.on(Events.on_train_batch_begin(every=10))
def fn2(trainer, batch, indices): def fn2(trainer, batch, indices):
pass pass
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@Trainer.on(Events.ON_TRAIN_BATCH_BEGIN(every=10)) @Trainer.on(Events.on_train_batch_begin(every=10))
def fn3(trainer, batch): def fn3(trainer, batch):
pass pass

View File

@ -25,8 +25,8 @@ class TrainPaddleConfig:
@pytest.mark.parametrize("driver,device", [("paddle", "cpu"), ("paddle", 1), ("fleet", [0, 1])]) @pytest.mark.parametrize("driver,device", [("paddle", "cpu"), ("paddle", 1), ("fleet", [0, 1])])
# @pytest.mark.parametrize("driver,device", [("fleet", [0, 1])]) # @pytest.mark.parametrize("driver,device", [("fleet", [0, 1])])
@pytest.mark.parametrize("callbacks", [[RecordMetricCallback(monitor="acc#acc", metric_threshold=0.0, larger_better=True), @pytest.mark.parametrize("callbacks", [[RichCallback(5)]])
RichCallback(5)]]) @pytest.mark.paddle
@magic_argv_env_context @magic_argv_env_context
def test_trainer_paddle( def test_trainer_paddle(
driver, driver,

View File

@ -98,6 +98,7 @@ def model_and_optimizers(request):
# 测试一下普通的情况; # 测试一下普通的情况;
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch", 1), ("torch", [0, 1])]) # ("torch", "cpu"), ("torch", 1), ("torch", [0, 1]) @pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch", 1), ("torch", [0, 1])]) # ("torch", "cpu"), ("torch", 1), ("torch", [0, 1])
@pytest.mark.parametrize("callbacks", [[RecordMetricCallback(monitor="acc", metric_threshold=0.2, larger_better=True)]]) @pytest.mark.parametrize("callbacks", [[RecordMetricCallback(monitor="acc", metric_threshold=0.2, larger_better=True)]])
@pytest.mark.parametrize("evaluate_every", [-3, -1, 100]) @pytest.mark.parametrize("evaluate_every", [-3, -1, 100])
@ -133,6 +134,7 @@ def test_trainer_torch_with_evaluator(
dist.destroy_process_group() dist.destroy_process_group()
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", [0, 1]), ("torch", 1)]) # ("torch", [0, 1]),("torch", 1) @pytest.mark.parametrize("driver,device", [("torch", [0, 1]), ("torch", 1)]) # ("torch", [0, 1]),("torch", 1)
@pytest.mark.parametrize("fp16", [True, False]) @pytest.mark.parametrize("fp16", [True, False])
@pytest.mark.parametrize("accumulation_steps", [1, 3]) @pytest.mark.parametrize("accumulation_steps", [1, 3])

View File

@ -76,6 +76,7 @@ def model_and_optimizers(request):
# 测试一下 cpu # 测试一下 cpu
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", "cpu")]) @pytest.mark.parametrize("driver,device", [("torch", "cpu")])
@magic_argv_env_context @magic_argv_env_context
def test_trainer_torch_without_evaluator( def test_trainer_torch_without_evaluator(
@ -107,6 +108,7 @@ def test_trainer_torch_without_evaluator(
dist.destroy_process_group() dist.destroy_process_group()
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", 1), ("torch", [1, 2])]) # ("torch", 4), @pytest.mark.parametrize("driver,device", [("torch", 1), ("torch", [1, 2])]) # ("torch", 4),
@pytest.mark.parametrize("fp16", [False, True]) @pytest.mark.parametrize("fp16", [False, True])
@pytest.mark.parametrize("accumulation_steps", [1, 3]) @pytest.mark.parametrize("accumulation_steps", [1, 3])
@ -146,6 +148,7 @@ def test_trainer_torch_without_evaluator_fp16_accumulation_steps(
# 测试 accumulation_steps # 测试 accumulation_steps
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch", 1), ("torch", [1, 2])]) @pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch", 1), ("torch", [1, 2])])
@pytest.mark.parametrize("accumulation_steps", [1, 3]) @pytest.mark.parametrize("accumulation_steps", [1, 3])
@magic_argv_env_context @magic_argv_env_context
@ -179,6 +182,7 @@ def test_trainer_torch_without_evaluator_accumulation_steps(
dist.destroy_process_group() dist.destroy_process_group()
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", [1, 2])]) @pytest.mark.parametrize("driver,device", [("torch", [1, 2])])
@pytest.mark.parametrize("output_from_new_proc", ["all", "ignore", "only_error", "test_log"]) @pytest.mark.parametrize("output_from_new_proc", ["all", "ignore", "only_error", "test_log"])
@magic_argv_env_context @magic_argv_env_context
@ -242,6 +246,7 @@ def test_trainer_output_from_new_proc(
rank_zero_rm(path) rank_zero_rm(path)
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", [1, 2])]) @pytest.mark.parametrize("driver,device", [("torch", [1, 2])])
@pytest.mark.parametrize("cur_rank", [0]) # 依次测试如果是当前进程出现错误,是否能够正确地 kill 掉其他进程; , 1, 2, 3 @pytest.mark.parametrize("cur_rank", [0]) # 依次测试如果是当前进程出现错误,是否能够正确地 kill 掉其他进程; , 1, 2, 3
@magic_argv_env_context @magic_argv_env_context
@ -294,6 +299,7 @@ def test_torch_distributed_launch_1(version):
subprocess.check_call(command) subprocess.check_call(command)
@pytest.mark.torch
@pytest.mark.parametrize("version", [0, 1, 2, 3]) @pytest.mark.parametrize("version", [0, 1, 2, 3])
@magic_argv_env_context @magic_argv_env_context
def test_torch_distributed_launch_2(version): def test_torch_distributed_launch_2(version):
@ -307,6 +313,7 @@ def test_torch_distributed_launch_2(version):
subprocess.check_call(command) subprocess.check_call(command)
@pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", 0), ("torch_ddp", [0, 1])]) @pytest.mark.parametrize("driver,device", [("torch", 0), ("torch_ddp", [0, 1])])
@magic_argv_env_context @magic_argv_env_context
def test_torch_wo_auto_param_call( def test_torch_wo_auto_param_call(

View File

@ -10,7 +10,7 @@ class Test_WrapDataLoader:
all_sanity_batches = [4, 20, 100] all_sanity_batches = [4, 20, 100]
for sanity_batches in all_sanity_batches: for sanity_batches in all_sanity_batches:
data = NormalIterator(num_of_data=1000) data = NormalIterator(num_of_data=1000)
wrapper = _TruncatedDataLoader(num_batches=sanity_batches) wrapper = _TruncatedDataLoader(dataloader=data, num_batches=sanity_batches)
dataloader = iter(wrapper(dataloader=data)) dataloader = iter(wrapper(dataloader=data))
mark = 0 mark = 0
while True: while True:
@ -31,7 +31,7 @@ class Test_WrapDataLoader:
for sanity_batches in all_sanity_batches: for sanity_batches in all_sanity_batches:
dataset = TorchNormalDataset(num_of_data=1000) dataset = TorchNormalDataset(num_of_data=1000)
dataloader = DataLoader(dataset, batch_size=bs, shuffle=True) dataloader = DataLoader(dataset, batch_size=bs, shuffle=True)
wrapper = _TruncatedDataLoader(num_batches=sanity_batches) wrapper = _TruncatedDataLoader(dataloader, num_batches=sanity_batches)
dataloader = wrapper(dataloader) dataloader = wrapper(dataloader)
dataloader = iter(dataloader) dataloader = iter(dataloader)
all_supposed_running_data_num = 0 all_supposed_running_data_num = 0
@ -54,7 +54,7 @@ class Test_WrapDataLoader:
for sanity_batches in all_sanity_batches: for sanity_batches in all_sanity_batches:
dataset = TorchNormalDataset(num_of_data=1000) dataset = TorchNormalDataset(num_of_data=1000)
dataloader = DataLoader(dataset, batch_size=bs, shuffle=True) dataloader = DataLoader(dataset, batch_size=bs, shuffle=True)
wrapper = _TruncatedDataLoader(num_batches=sanity_batches) wrapper = _TruncatedDataLoader(dataloader, num_batches=sanity_batches)
dataloader = wrapper(dataloader) dataloader = wrapper(dataloader)
length.append(len(dataloader)) length.append(len(dataloader))
assert length == reduce(lambda x, y: x+y, [all_sanity_batches for _ in range(len(bses))]) assert length == reduce(lambda x, y: x+y, [all_sanity_batches for _ in range(len(bses))])

View File

@ -1,12 +1,16 @@
import pytest import pytest
from jittor.dataset import Dataset
import jittor
import numpy as np import numpy as np
from datasets import Dataset as HfDataset from datasets import Dataset as HfDataset
from datasets import load_dataset from datasets import load_dataset
from fastNLP.core.dataloaders.jittor_dataloader import JittorDataLoader from fastNLP.core.dataloaders.jittor_dataloader import JittorDataLoader
from fastNLP.core.dataset import DataSet as Fdataset from fastNLP.core.dataset import DataSet as Fdataset
from fastNLP.envs.imports import _NEED_IMPORT_JITTOR
if _NEED_IMPORT_JITTOR:
from jittor.dataset import Dataset
import jittor
else:
from fastNLP.core.utils.dummy_class import DummyClass as Dataset
class MyDataset(Dataset): class MyDataset(Dataset):
@ -25,7 +29,7 @@ class MyDataset(Dataset):
# def __len__(self): # def __len__(self):
# return self.dataset_len # return self.dataset_len
@pytest.mark.jittor
class TestJittor: class TestJittor:
def test_v1(self): def test_v1(self):

View File

@ -1,13 +1,18 @@
import unittest import pytest
import os import os
import numpy as np import numpy as np
import jittor as jt # 将 jittor 引入
from jittor import nn, Module # 引入相关的模块
from jittor import init
from jittor.dataset import MNIST
from fastNLP.core.drivers.jittor_driver.single_device import JittorSingleDriver from fastNLP.core.drivers.jittor_driver.single_device import JittorSingleDriver
from fastNLP.envs.imports import _NEED_IMPORT_JITTOR
if _NEED_IMPORT_JITTOR:
import jittor as jt # 将 jittor 引入
from jittor import nn, Module # 引入相关的模块
from jittor import init
from jittor.dataset import MNIST
else:
from fastNLP.core.utils.dummy_class import DummyClass as Module
class Model (Module): class Model (Module):
@ -39,7 +44,8 @@ class Model (Module):
x = self.fc2 (x) x = self.fc2 (x)
return x return x
class SingleDeviceTestCase(unittest.TestCase): @pytest.mark.jittor
class TestSingleDevice:
def test_on_gpu_without_fp16(self): def test_on_gpu_without_fp16(self):
# TODO get_dataloader # TODO get_dataloader
@ -82,7 +88,7 @@ class SingleDeviceTestCase(unittest.TestCase):
total_acc += acc total_acc += acc
total_num += batch_size total_num += batch_size
acc = acc / batch_size acc = acc / batch_size
self.assertGreater(total_acc / total_num, 0.95) assert total_acc / total_num > 0.95
def test_on_cpu_without_fp16(self): def test_on_cpu_without_fp16(self):

View File

@ -18,6 +18,7 @@ from tests.helpers.utils import magic_argv_env_context
import paddle import paddle
import paddle.distributed as dist import paddle.distributed as dist
@pytest.mark.paddle
class TestDistUtilsTools: class TestDistUtilsTools:
""" """
测试一些工具函数 测试一些工具函数
@ -78,6 +79,7 @@ class TestDistUtilsTools:
assert res["string"] == paddle_dict["string"] assert res["string"] == paddle_dict["string"]
@pytest.mark.paddle
class TestAllGatherAndBroadCast: class TestAllGatherAndBroadCast:
@classmethod @classmethod

View File

@ -38,6 +38,7 @@ def generate_driver(num_labels, feature_dimension, device=[0,1], fp16=False, out
# #
############################################################################ ############################################################################
@pytest.mark.paddle
class TestFleetDriverFunction: class TestFleetDriverFunction:
""" """
测试 PaddleFleetDriver 一些简单函数的测试类基本都是测试能否运行是否存在 import 错误等问题 测试 PaddleFleetDriver 一些简单函数的测试类基本都是测试能否运行是否存在 import 错误等问题
@ -145,6 +146,7 @@ class TestFleetDriverFunction:
# #
############################################################################ ############################################################################
@pytest.mark.paddle
class TestSetDistReproDataloader: class TestSetDistReproDataloader:
@classmethod @classmethod
@ -517,6 +519,8 @@ class TestSetDistReproDataloader:
# 测试 save 和 load 相关的功能 # 测试 save 和 load 相关的功能
# #
############################################################################ ############################################################################
@pytest.mark.paddle
class TestSaveLoad: class TestSaveLoad:
""" """
测试多卡情况下 save load 相关函数的表现 测试多卡情况下 save load 相关函数的表现

View File

@ -8,12 +8,14 @@ from tests.helpers.utils import magic_argv_env_context
import paddle import paddle
@pytest.mark.paddle
def test_incorrect_driver(): def test_incorrect_driver():
model = PaddleNormalModel_Classification_1(2, 100) model = PaddleNormalModel_Classification_1(2, 100)
with pytest.raises(ValueError): with pytest.raises(ValueError):
driver = initialize_paddle_driver("torch", 0, model) driver = initialize_paddle_driver("torch", 0, model)
@pytest.mark.paddle
@pytest.mark.parametrize( @pytest.mark.parametrize(
"device", "device",
["cpu", "gpu:0", 0] ["cpu", "gpu:0", 0]
@ -31,6 +33,7 @@ def test_get_single_device(driver, device):
driver = initialize_paddle_driver(driver, device, model) driver = initialize_paddle_driver(driver, device, model)
assert isinstance(driver, PaddleSingleDriver) assert isinstance(driver, PaddleSingleDriver)
@pytest.mark.paddle
@pytest.mark.parametrize( @pytest.mark.parametrize(
"device", "device",
[0, 1, [1]] [0, 1, [1]]
@ -50,6 +53,7 @@ def test_get_fleet_2(driver, device):
assert isinstance(driver, PaddleFleetDriver) assert isinstance(driver, PaddleFleetDriver)
@pytest.mark.paddle
@pytest.mark.parametrize( @pytest.mark.parametrize(
"device", "device",
[[0, 2, 3], -1] [[0, 2, 3], -1]
@ -69,6 +73,7 @@ def test_get_fleet(driver, device):
assert isinstance(driver, PaddleFleetDriver) assert isinstance(driver, PaddleFleetDriver)
@pytest.mark.paddle
@pytest.mark.parametrize( @pytest.mark.parametrize(
("driver", "device"), ("driver", "device"),
[("fleet", "cpu")] [("fleet", "cpu")]
@ -82,6 +87,7 @@ def test_get_fleet_cpu(driver, device):
with pytest.raises(ValueError): with pytest.raises(ValueError):
driver = initialize_paddle_driver(driver, device, model) driver = initialize_paddle_driver(driver, device, model)
@pytest.mark.paddle
@pytest.mark.parametrize( @pytest.mark.parametrize(
"device", "device",
[-2, [0, get_gpu_count() + 1, 3], [-2], get_gpu_count() + 1] [-2, [0, get_gpu_count() + 1, 3], [-2], get_gpu_count() + 1]
@ -97,4 +103,4 @@ def test_device_out_of_range(driver, device):
""" """
model = PaddleNormalModel_Classification_1(2, 100) model = PaddleNormalModel_Classification_1(2, 100)
with pytest.raises(ValueError): with pytest.raises(ValueError):
driver = initialize_paddle_driver(driver, device, model) driver = initialize_paddle_driver(driver, device, model)

View File

@ -29,6 +29,7 @@ class TestPaddleDriverFunctions:
model = PaddleNormalModel_Classification_1(10, 32) model = PaddleNormalModel_Classification_1(10, 32)
self.driver = PaddleSingleDriver(model, device="cpu") self.driver = PaddleSingleDriver(model, device="cpu")
@pytest.mark.torchpaddle
def test_check_single_optimizer_legality(self): def test_check_single_optimizer_legality(self):
""" """
测试传入单个 optimizer 时的表现 测试传入单个 optimizer 时的表现
@ -45,6 +46,7 @@ class TestPaddleDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.driver.set_optimizers(optimizer) self.driver.set_optimizers(optimizer)
@pytest.mark.torchpaddle
def test_check_optimizers_legality(self): def test_check_optimizers_legality(self):
""" """
测试传入 optimizer list 的表现 测试传入 optimizer list 的表现
@ -65,6 +67,7 @@ class TestPaddleDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.driver.set_optimizers(optimizers) self.driver.set_optimizers(optimizers)
@pytest.mark.torchpaddle
def test_check_dataloader_legality_in_train(self): def test_check_dataloader_legality_in_train(self):
""" """
测试 `is_train` 参数为 True _check_dataloader_legality 函数的表现 测试 `is_train` 参数为 True _check_dataloader_legality 函数的表现
@ -85,6 +88,7 @@ class TestPaddleDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
PaddleSingleDriver.check_dataloader_legality(dataloader, "dataloader", True) PaddleSingleDriver.check_dataloader_legality(dataloader, "dataloader", True)
@pytest.mark.torchpaddle
def test_check_dataloader_legality_in_test(self): def test_check_dataloader_legality_in_test(self):
""" """
测试 `is_train` 参数为 False _check_dataloader_legality 函数的表现 测试 `is_train` 参数为 False _check_dataloader_legality 函数的表现
@ -122,6 +126,7 @@ class TestPaddleDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
PaddleSingleDriver.check_dataloader_legality(dataloader, "dataloader", False) PaddleSingleDriver.check_dataloader_legality(dataloader, "dataloader", False)
@pytest.mark.paddle
def test_tensor_to_numeric(self): def test_tensor_to_numeric(self):
""" """
测试 tensor_to_numeric 函数 测试 tensor_to_numeric 函数
@ -175,6 +180,7 @@ class TestPaddleDriverFunctions:
assert r == d.tolist() assert r == d.tolist()
assert res["dict"]["tensor"] == tensor_dict["dict"]["tensor"].tolist() assert res["dict"]["tensor"] == tensor_dict["dict"]["tensor"].tolist()
@pytest.mark.paddle
def test_set_model_mode(self): def test_set_model_mode(self):
""" """
测试 set_model_mode 函数 测试 set_model_mode 函数
@ -187,6 +193,7 @@ class TestPaddleDriverFunctions:
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
self.driver.set_model_mode("test") self.driver.set_model_mode("test")
@pytest.mark.paddle
def test_move_model_to_device_cpu(self): def test_move_model_to_device_cpu(self):
""" """
测试 move_model_to_device 函数 测试 move_model_to_device 函数
@ -194,6 +201,7 @@ class TestPaddleDriverFunctions:
PaddleSingleDriver.move_model_to_device(self.driver.model, "cpu") PaddleSingleDriver.move_model_to_device(self.driver.model, "cpu")
assert self.driver.model.linear1.weight.place.is_cpu_place() assert self.driver.model.linear1.weight.place.is_cpu_place()
@pytest.mark.paddle
def test_move_model_to_device_gpu(self): def test_move_model_to_device_gpu(self):
""" """
测试 move_model_to_device 函数 测试 move_model_to_device 函数
@ -202,6 +210,7 @@ class TestPaddleDriverFunctions:
assert self.driver.model.linear1.weight.place.is_gpu_place() assert self.driver.model.linear1.weight.place.is_gpu_place()
assert self.driver.model.linear1.weight.place.gpu_device_id() == 0 assert self.driver.model.linear1.weight.place.gpu_device_id() == 0
@pytest.mark.paddle
def test_worker_init_function(self): def test_worker_init_function(self):
""" """
测试 worker_init_function 测试 worker_init_function
@ -210,6 +219,7 @@ class TestPaddleDriverFunctions:
# TODO正确性 # TODO正确性
PaddleSingleDriver.worker_init_function(0) PaddleSingleDriver.worker_init_function(0)
@pytest.mark.paddle
def test_set_deterministic_dataloader(self): def test_set_deterministic_dataloader(self):
""" """
测试 set_deterministic_dataloader 测试 set_deterministic_dataloader
@ -219,6 +229,7 @@ class TestPaddleDriverFunctions:
dataloader = DataLoader(PaddleNormalDataset()) dataloader = DataLoader(PaddleNormalDataset())
self.driver.set_deterministic_dataloader(dataloader) self.driver.set_deterministic_dataloader(dataloader)
@pytest.mark.paddle
def test_set_sampler_epoch(self): def test_set_sampler_epoch(self):
""" """
测试 set_sampler_epoch 测试 set_sampler_epoch
@ -228,6 +239,7 @@ class TestPaddleDriverFunctions:
dataloader = DataLoader(PaddleNormalDataset()) dataloader = DataLoader(PaddleNormalDataset())
self.driver.set_sampler_epoch(dataloader, 0) self.driver.set_sampler_epoch(dataloader, 0)
@pytest.mark.paddle
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("drop_last", [True, False]) @pytest.mark.parametrize("drop_last", [True, False])
@ -253,6 +265,7 @@ class TestPaddleDriverFunctions:
assert res.batch_size == batch_size assert res.batch_size == batch_size
assert res.drop_last == drop_last assert res.drop_last == drop_last
@pytest.mark.paddle
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("drop_last", [True, False]) @pytest.mark.parametrize("drop_last", [True, False])
@ -281,6 +294,7 @@ class TestPaddleDriverFunctions:
assert res.batch_size == batch_size assert res.batch_size == batch_size
assert res.drop_last == drop_last assert res.drop_last == drop_last
@pytest.mark.paddle
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("drop_last", [True, False]) @pytest.mark.parametrize("drop_last", [True, False])
@ -311,6 +325,7 @@ class TestPaddleDriverFunctions:
# #
############################################################################ ############################################################################
@pytest.mark.paddle
class TestSingleDeviceFunction: class TestSingleDeviceFunction:
""" """
测试其它函数的测试例 测试其它函数的测试例
@ -345,6 +360,7 @@ class TestSingleDeviceFunction:
# #
############################################################################ ############################################################################
@pytest.mark.paddle
class TestSetDistReproDataloader: class TestSetDistReproDataloader:
""" """
专门测试 set_dist_repro_dataloader 函数的类 专门测试 set_dist_repro_dataloader 函数的类
@ -541,6 +557,7 @@ def prepare_test_save_load():
driver1, driver2 = generate_random_driver(10, 10), generate_random_driver(10, 10) driver1, driver2 = generate_random_driver(10, 10), generate_random_driver(10, 10)
return driver1, driver2, dataloader return driver1, driver2, dataloader
@pytest.mark.paddle
@pytest.mark.parametrize("only_state_dict", ([True, False])) @pytest.mark.parametrize("only_state_dict", ([True, False]))
def test_save_and_load_model(prepare_test_save_load, only_state_dict): def test_save_and_load_model(prepare_test_save_load, only_state_dict):
""" """
@ -570,6 +587,7 @@ def test_save_and_load_model(prepare_test_save_load, only_state_dict):
rank_zero_rm(path + ".pdiparams.info") rank_zero_rm(path + ".pdiparams.info")
rank_zero_rm(path + ".pdmodel") rank_zero_rm(path + ".pdmodel")
@pytest.mark.paddle
# @pytest.mark.parametrize("only_state_dict", ([True, False])) # @pytest.mark.parametrize("only_state_dict", ([True, False]))
@pytest.mark.parametrize("only_state_dict", ([True])) @pytest.mark.parametrize("only_state_dict", ([True]))
@pytest.mark.parametrize("fp16", ([True, False])) @pytest.mark.parametrize("fp16", ([True, False]))
@ -650,6 +668,7 @@ def test_save_and_load_with_randombatchsampler(only_state_dict, fp16):
# @pytest.mark.parametrize("only_state_dict", ([True, False])) # @pytest.mark.parametrize("only_state_dict", ([True, False]))
# TODO 在有迭代且使用了paddle.jit.save的时候会引发段错误注释掉任意一段都不会出错 # TODO 在有迭代且使用了paddle.jit.save的时候会引发段错误注释掉任意一段都不会出错
# 但无法在单独的文件中复现 # 但无法在单独的文件中复现
@pytest.mark.paddle
@pytest.mark.parametrize("only_state_dict", ([True])) @pytest.mark.parametrize("only_state_dict", ([True]))
@pytest.mark.parametrize("fp16", ([True, False])) @pytest.mark.parametrize("fp16", ([True, False]))
def test_save_and_load_with_randomsampler(only_state_dict, fp16): def test_save_and_load_with_randomsampler(only_state_dict, fp16):

View File

@ -1,3 +1,4 @@
import os
import pytest import pytest
from fastNLP.core.drivers.paddle_driver.utils import ( from fastNLP.core.drivers.paddle_driver.utils import (
@ -23,12 +24,14 @@ from tests.helpers.datasets.paddle_data import PaddleNormalDataset
("3,6,7,8", "6,7,8", "gpu:2", str, "gpu:1"), ("3,6,7,8", "6,7,8", "gpu:2", str, "gpu:1"),
) )
) )
@pytest.mark.paddle
def test_get_device_from_visible_str(user_visible_devices, cuda_visible_devices, device, output_type, correct): def test_get_device_from_visible_str(user_visible_devices, cuda_visible_devices, device, output_type, correct):
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
os.environ["USER_CUDA_VISIBLE_DEVICES"] = user_visible_devices os.environ["USER_CUDA_VISIBLE_DEVICES"] = user_visible_devices
res = get_device_from_visible(device, output_type) res = get_device_from_visible(device, output_type)
assert res == correct assert res == correct
@pytest.mark.paddle
def test_replace_batch_sampler(): def test_replace_batch_sampler():
dataset = PaddleNormalDataset(10) dataset = PaddleNormalDataset(10)
dataloader = DataLoader(dataset, batch_size=32) dataloader = DataLoader(dataset, batch_size=32)
@ -42,6 +45,7 @@ def test_replace_batch_sampler():
assert len(replaced_loader.dataset) == len(dataset) assert len(replaced_loader.dataset) == len(dataset)
assert replaced_loader.batch_sampler.batch_size == 16 assert replaced_loader.batch_sampler.batch_size == 16
@pytest.mark.paddle
def test_replace_sampler(): def test_replace_sampler():
dataset = PaddleNormalDataset(10) dataset = PaddleNormalDataset(10)
dataloader = DataLoader(dataset, batch_size=32) dataloader = DataLoader(dataset, batch_size=32)

View File

@ -1,31 +0,0 @@
import sys
sys.path.append("../../../../")
from fastNLP.core.drivers.torch_driver.ddp import TorchDDPDriver
from tests.helpers.models.torch_model import TorchNormalModel_Classification_1
import torch
device = [0, 1]
torch_model = TorchNormalModel_Classification_1(10, 10)
torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01)
device = [torch.device(i) for i in device]
driver = TorchDDPDriver(
model=torch_model,
parallel_device=device,
fp16=False
)
driver.set_optimizers(torch_opt)
driver.setup()
print("-----------first--------------")
device = [0, 2]
torch_model = TorchNormalModel_Classification_1(10, 10)
torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01)
device = [torch.device(i) for i in device]
driver = TorchDDPDriver(
model=torch_model,
parallel_device=device,
fp16=False
)
driver.set_optimizers(torch_opt)
driver.setup()

View File

@ -1,4 +1,5 @@
import os import os
import pytest
import torch import torch
import torch.distributed as dist import torch.distributed as dist

View File

@ -62,6 +62,7 @@ class TestTorchDriverFunctions:
model = TorchNormalModel_Classification_1(10, 32) model = TorchNormalModel_Classification_1(10, 32)
self.driver = TorchSingleDriver(model, device="cpu") self.driver = TorchSingleDriver(model, device="cpu")
@pytest.mark.torchpaddle
def test_check_single_optimizer_legality(self): def test_check_single_optimizer_legality(self):
""" """
测试传入单个 optimizer 时的表现 测试传入单个 optimizer 时的表现
@ -81,6 +82,7 @@ class TestTorchDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.driver.set_optimizers(optimizer) self.driver.set_optimizers(optimizer)
@pytest.mark.torchpaddle
def test_check_optimizers_legality(self): def test_check_optimizers_legality(self):
""" """
测试传入 optimizer list 的表现 测试传入 optimizer list 的表现
@ -104,6 +106,7 @@ class TestTorchDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
self.driver.set_optimizers(optimizers) self.driver.set_optimizers(optimizers)
@pytest.mark.torchpaddle
def test_check_dataloader_legality_in_train(self): def test_check_dataloader_legality_in_train(self):
""" """
测试 `is_train` 参数为 True _check_dataloader_legality 函数的表现 测试 `is_train` 参数为 True _check_dataloader_legality 函数的表现
@ -119,6 +122,7 @@ class TestTorchDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
TorchSingleDriver.check_dataloader_legality(dataloader, "dataloader", True) TorchSingleDriver.check_dataloader_legality(dataloader, "dataloader", True)
@pytest.mark.torchpaddle
def test_check_dataloader_legality_in_test(self): def test_check_dataloader_legality_in_test(self):
""" """
测试 `is_train` 参数为 False _check_dataloader_legality 函数的表现 测试 `is_train` 参数为 False _check_dataloader_legality 函数的表现
@ -148,6 +152,7 @@ class TestTorchDriverFunctions:
with pytest.raises(ValueError): with pytest.raises(ValueError):
TorchSingleDriver.check_dataloader_legality(dataloader, "dataloader", False) TorchSingleDriver.check_dataloader_legality(dataloader, "dataloader", False)
@pytest.mark.torch
def test_tensor_to_numeric(self): def test_tensor_to_numeric(self):
""" """
测试 tensor_to_numeric 函数 测试 tensor_to_numeric 函数
@ -201,6 +206,7 @@ class TestTorchDriverFunctions:
assert r == d.tolist() assert r == d.tolist()
assert res["dict"]["tensor"] == tensor_dict["dict"]["tensor"].tolist() assert res["dict"]["tensor"] == tensor_dict["dict"]["tensor"].tolist()
@pytest.mark.torch
def test_set_model_mode(self): def test_set_model_mode(self):
""" """
测试set_model_mode函数 测试set_model_mode函数
@ -213,6 +219,7 @@ class TestTorchDriverFunctions:
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
self.driver.set_model_mode("test") self.driver.set_model_mode("test")
@pytest.mark.torch
def test_move_model_to_device_cpu(self): def test_move_model_to_device_cpu(self):
""" """
测试move_model_to_device函数 测试move_model_to_device函数
@ -220,6 +227,7 @@ class TestTorchDriverFunctions:
TorchSingleDriver.move_model_to_device(self.driver.model, "cpu") TorchSingleDriver.move_model_to_device(self.driver.model, "cpu")
assert self.driver.model.linear1.weight.device.type == "cpu" assert self.driver.model.linear1.weight.device.type == "cpu"
@pytest.mark.torch
def test_move_model_to_device_gpu(self): def test_move_model_to_device_gpu(self):
""" """
测试move_model_to_device函数 测试move_model_to_device函数
@ -228,6 +236,7 @@ class TestTorchDriverFunctions:
assert self.driver.model.linear1.weight.device.type == "cuda" assert self.driver.model.linear1.weight.device.type == "cuda"
assert self.driver.model.linear1.weight.device.index == 0 assert self.driver.model.linear1.weight.device.index == 0
@pytest.mark.torch
def test_worker_init_function(self): def test_worker_init_function(self):
""" """
测试worker_init_function 测试worker_init_function
@ -236,6 +245,7 @@ class TestTorchDriverFunctions:
# TODO正确性 # TODO正确性
TorchSingleDriver.worker_init_function(0) TorchSingleDriver.worker_init_function(0)
@pytest.mark.torch
def test_set_deterministic_dataloader(self): def test_set_deterministic_dataloader(self):
""" """
测试set_deterministic_dataloader 测试set_deterministic_dataloader
@ -245,6 +255,7 @@ class TestTorchDriverFunctions:
dataloader = DataLoader(TorchNormalDataset()) dataloader = DataLoader(TorchNormalDataset())
self.driver.set_deterministic_dataloader(dataloader) self.driver.set_deterministic_dataloader(dataloader)
@pytest.mark.torch
def test_set_sampler_epoch(self): def test_set_sampler_epoch(self):
""" """
测试set_sampler_epoch 测试set_sampler_epoch
@ -254,6 +265,7 @@ class TestTorchDriverFunctions:
dataloader = DataLoader(TorchNormalDataset()) dataloader = DataLoader(TorchNormalDataset())
self.driver.set_sampler_epoch(dataloader, 0) self.driver.set_sampler_epoch(dataloader, 0)
@pytest.mark.torch
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("drop_last", [True, False]) @pytest.mark.parametrize("drop_last", [True, False])
@ -279,6 +291,7 @@ class TestTorchDriverFunctions:
assert res.batch_size == batch_size assert res.batch_size == batch_size
assert res.drop_last == drop_last assert res.drop_last == drop_last
@pytest.mark.torch
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("drop_last", [True, False]) @pytest.mark.parametrize("drop_last", [True, False])
@ -300,6 +313,7 @@ class TestTorchDriverFunctions:
assert res.batch_size == batch_size assert res.batch_size == batch_size
assert res.drop_last == drop_last assert res.drop_last == drop_last
@pytest.mark.torch
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("drop_last", [True, False]) @pytest.mark.parametrize("drop_last", [True, False])
@ -325,6 +339,7 @@ class TestTorchDriverFunctions:
# #
############################################################################ ############################################################################
@pytest.mark.torch
class TestSingleDeviceFunction: class TestSingleDeviceFunction:
""" """
测试其它函数的测试例 测试其它函数的测试例
@ -359,6 +374,7 @@ class TestSingleDeviceFunction:
# #
############################################################################ ############################################################################
@pytest.mark.torch
class TestSetDistReproDataloader: class TestSetDistReproDataloader:
""" """
专门测试 set_dist_repro_dataloader 函数的类 专门测试 set_dist_repro_dataloader 函数的类
@ -534,6 +550,7 @@ def prepare_test_save_load():
driver1, driver2 = generate_random_driver(10, 10), generate_random_driver(10, 10) driver1, driver2 = generate_random_driver(10, 10), generate_random_driver(10, 10)
return driver1, driver2, dataloader return driver1, driver2, dataloader
@pytest.mark.torch
@pytest.mark.parametrize("only_state_dict", ([True, False])) @pytest.mark.parametrize("only_state_dict", ([True, False]))
def test_save_and_load_model(prepare_test_save_load, only_state_dict): def test_save_and_load_model(prepare_test_save_load, only_state_dict):
""" """
@ -555,6 +572,7 @@ def test_save_and_load_model(prepare_test_save_load, only_state_dict):
finally: finally:
rank_zero_rm(path) rank_zero_rm(path)
@pytest.mark.torch
@pytest.mark.parametrize("only_state_dict", ([True, False])) @pytest.mark.parametrize("only_state_dict", ([True, False]))
@pytest.mark.parametrize("fp16", ([True, False])) @pytest.mark.parametrize("fp16", ([True, False]))
def test_save_and_load_with_randombatchsampler(only_state_dict, fp16): def test_save_and_load_with_randombatchsampler(only_state_dict, fp16):
@ -623,6 +641,7 @@ def test_save_and_load_with_randombatchsampler(only_state_dict, fp16):
finally: finally:
rank_zero_rm(path) rank_zero_rm(path)
@pytest.mark.torch
@pytest.mark.parametrize("only_state_dict", ([True, False])) @pytest.mark.parametrize("only_state_dict", ([True, False]))
@pytest.mark.parametrize("fp16", ([True, False])) @pytest.mark.parametrize("fp16", ([True, False]))
def test_save_and_load_with_randomsampler(only_state_dict, fp16): def test_save_and_load_with_randomsampler(only_state_dict, fp16):

View File

@ -1,4 +1,4 @@
import unittest import pytest
from fastNLP.modules.mix_modules.mix_module import MixModule from fastNLP.modules.mix_modules.mix_module import MixModule
from fastNLP.core.drivers.torch_paddle_driver.torch_paddle_driver import TorchPaddleDriver from fastNLP.core.drivers.torch_paddle_driver.torch_paddle_driver import TorchPaddleDriver
@ -56,10 +56,11 @@ class MixMNISTModel(MixModule):
def test_step(self, x): def test_step(self, x):
return self.forward(x) return self.forward(x)
class TestMNIST(unittest.TestCase): @pytest.mark.torchpaddle
class TestMNIST:
@classmethod @classmethod
def setUpClass(self): def setup_class(self):
self.train_dataset = paddle.vision.datasets.MNIST(mode='train') self.train_dataset = paddle.vision.datasets.MNIST(mode='train')
self.test_dataset = paddle.vision.datasets.MNIST(mode='test') self.test_dataset = paddle.vision.datasets.MNIST(mode='test')
@ -70,7 +71,7 @@ class TestMNIST(unittest.TestCase):
self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True) self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)
def setUp(self): def setup_method(self):
model = MixMNISTModel() model = MixMNISTModel()
self.torch_loss_func = torch.nn.CrossEntropyLoss() self.torch_loss_func = torch.nn.CrossEntropyLoss()
@ -118,4 +119,4 @@ class TestMNIST(unittest.TestCase):
correct += 1 correct += 1
acc = correct / len(self.test_dataset) acc = correct / len(self.test_dataset)
self.assertGreater(acc, 0.85) assert acc > 0.85

View File

@ -49,12 +49,12 @@ def test_accuracy_single():
# 测试 单机多卡情况下的Accuracy # 测试 单机多卡情况下的Accuracy
# #
############################################################################ ############################################################################
def test_accuracy_ddp(): # def test_accuracy_ddp():
launcher = FleetLauncher(devices=[0, 1]) # launcher = FleetLauncher(devices=[0, 1])
launcher.launch() # launcher.launch()
role = role_maker.PaddleCloudRoleMaker(is_collective=True) # role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) # fleet.init(role)
if fleet.is_server(): # if fleet.is_server():
pass # pass
elif fleet.is_worker(): # elif fleet.is_worker():
print(os.getenv("PADDLE_TRAINER_ID")) # print(os.getenv("PADDLE_TRAINER_ID"))

View File

@ -1,26 +0,0 @@
from fastNLP.core.metrics.metric import Metric
from collections import defaultdict
from functools import partial
import unittest
class MyMetric(Metric):
def __init__(self, backend='auto',
aggregate_when_get_metric: bool = False):
super(MyMetric, self).__init__(backend=backend, aggregate_when_get_metric=aggregate_when_get_metric)
self.tp = defaultdict(partial(self.register_element, aggregate_method='sum'))
def update(self, item):
self.tp['1'] += item
class TestMetric(unittest.TestCase):
def test_va1(self):
my = MyMetric()
my.update(1)
print(my.tp['1'])

View File

@ -29,6 +29,8 @@ class TestUnrepeatedSampler:
@pytest.mark.parametrize('num_of_data', [2, 3, 4, 100]) @pytest.mark.parametrize('num_of_data', [2, 3, 4, 100])
@pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('shuffle', [False, True])
def test_multi(self, num_replicas, num_of_data, shuffle): def test_multi(self, num_replicas, num_of_data, shuffle):
if num_replicas > num_of_data:
pytest.skip("num_replicas > num_of_data")
data = DatasetWithVaryLength(num_of_data=num_of_data) data = DatasetWithVaryLength(num_of_data=num_of_data)
samplers = [] samplers = []
for i in range(num_replicas): for i in range(num_replicas):
@ -53,6 +55,8 @@ class TestUnrepeatedSortedSampler:
@pytest.mark.parametrize('num_replicas', [2, 3]) @pytest.mark.parametrize('num_replicas', [2, 3])
@pytest.mark.parametrize('num_of_data', [2, 3, 4, 100]) @pytest.mark.parametrize('num_of_data', [2, 3, 4, 100])
def test_multi(self, num_replicas, num_of_data): def test_multi(self, num_replicas, num_of_data):
if num_replicas > num_of_data:
pytest.skip("num_replicas > num_of_data")
data = DatasetWithVaryLength(num_of_data=num_of_data) data = DatasetWithVaryLength(num_of_data=num_of_data)
samplers = [] samplers = []
for i in range(num_replicas): for i in range(num_replicas):
@ -84,6 +88,8 @@ class TestUnrepeatedSequentialSampler:
@pytest.mark.parametrize('num_replicas', [2, 3]) @pytest.mark.parametrize('num_replicas', [2, 3])
@pytest.mark.parametrize('num_of_data', [2, 3, 4, 100]) @pytest.mark.parametrize('num_of_data', [2, 3, 4, 100])
def test_multi(self, num_replicas, num_of_data): def test_multi(self, num_replicas, num_of_data):
if num_replicas > num_of_data:
pytest.skip("num_replicas > num_of_data")
data = DatasetWithVaryLength(num_of_data=num_of_data) data = DatasetWithVaryLength(num_of_data=num_of_data)
samplers = [] samplers = []
for i in range(num_replicas): for i in range(num_replicas):

View File

@ -1,4 +1,3 @@
import unittest
import pytest import pytest
import paddle import paddle
@ -12,21 +11,21 @@ from fastNLP.core.utils.paddle_utils import paddle_to, paddle_move_data_to_devic
############################################################################ ############################################################################
@pytest.mark.paddle @pytest.mark.paddle
class PaddleToDeviceTestCase(unittest.TestCase): class TestPaddleToDevice:
def test_case(self): def test_case(self):
tensor = paddle.rand((4, 5)) tensor = paddle.rand((4, 5))
res = paddle_to(tensor, "gpu") res = paddle_to(tensor, "gpu")
self.assertTrue(res.place.is_gpu_place()) assert res.place.is_gpu_place()
self.assertEqual(res.place.gpu_device_id(), 0) assert res.place.gpu_device_id() == 0
res = paddle_to(tensor, "cpu") res = paddle_to(tensor, "cpu")
self.assertTrue(res.place.is_cpu_place()) assert res.place.is_cpu_place()
res = paddle_to(tensor, "gpu:2") res = paddle_to(tensor, "gpu:2")
self.assertTrue(res.place.is_gpu_place()) assert res.place.is_gpu_place()
self.assertEqual(res.place.gpu_device_id(), 2) assert res.place.gpu_device_id() == 2
res = paddle_to(tensor, "gpu:1") res = paddle_to(tensor, "gpu:1")
self.assertTrue(res.place.is_gpu_place()) assert res.place.is_gpu_place()
self.assertEqual(res.place.gpu_device_id(), 1) assert res.place.gpu_device_id() == 1
############################################################################ ############################################################################
# #
@ -34,22 +33,22 @@ class PaddleToDeviceTestCase(unittest.TestCase):
# #
############################################################################ ############################################################################
class PaddleMoveDataToDeviceTestCase(unittest.TestCase): class TestPaddleMoveDataToDevice:
def check_gpu(self, tensor, idx): def check_gpu(self, tensor, idx):
""" """
检查张量是否在指定的设备上的工具函数 检查张量是否在指定的设备上的工具函数
""" """
self.assertTrue(tensor.place.is_gpu_place()) assert tensor.place.is_gpu_place()
self.assertEqual(tensor.place.gpu_device_id(), idx) assert tensor.place.gpu_device_id() == idx
def check_cpu(self, tensor): def check_cpu(self, tensor):
""" """
检查张量是否在cpu上的工具函数 检查张量是否在cpu上的工具函数
""" """
self.assertTrue(tensor.place.is_cpu_place()) assert tensor.place.is_cpu_place()
def test_tensor_transfer(self): def test_tensor_transfer(self):
""" """
@ -82,22 +81,22 @@ class PaddleMoveDataToDeviceTestCase(unittest.TestCase):
paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)] paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)]
res = paddle_move_data_to_device(paddle_list, device=None, data_device="gpu:1") res = paddle_move_data_to_device(paddle_list, device=None, data_device="gpu:1")
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
res = paddle_move_data_to_device(paddle_list, device="cpu", data_device="gpu:1") res = paddle_move_data_to_device(paddle_list, device="cpu", data_device="gpu:1")
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_cpu(r) self.check_cpu(r)
res = paddle_move_data_to_device(paddle_list, device="gpu:0", data_device=None) res = paddle_move_data_to_device(paddle_list, device="gpu:0", data_device=None)
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_gpu(r, 0) self.check_gpu(r, 0)
res = paddle_move_data_to_device(paddle_list, device="gpu:1", data_device="cpu") res = paddle_move_data_to_device(paddle_list, device="gpu:1", data_device="cpu")
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
@ -109,22 +108,22 @@ class PaddleMoveDataToDeviceTestCase(unittest.TestCase):
paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)] paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)]
paddle_tuple = tuple(paddle_list) paddle_tuple = tuple(paddle_list)
res = paddle_move_data_to_device(paddle_tuple, device=None, data_device="gpu:1") res = paddle_move_data_to_device(paddle_tuple, device=None, data_device="gpu:1")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
res = paddle_move_data_to_device(paddle_tuple, device="cpu", data_device="gpu:1") res = paddle_move_data_to_device(paddle_tuple, device="cpu", data_device="gpu:1")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_cpu(r) self.check_cpu(r)
res = paddle_move_data_to_device(paddle_tuple, device="gpu:0", data_device=None) res = paddle_move_data_to_device(paddle_tuple, device="gpu:0", data_device=None)
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_gpu(r, 0) self.check_gpu(r, 0)
res = paddle_move_data_to_device(paddle_tuple, device="gpu:1", data_device="cpu") res = paddle_move_data_to_device(paddle_tuple, device="gpu:1", data_device="cpu")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
@ -145,57 +144,57 @@ class PaddleMoveDataToDeviceTestCase(unittest.TestCase):
} }
res = paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device=None) res = paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device=None)
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_gpu(res["tensor"], 0) self.check_gpu(res["tensor"], 0)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.check_gpu(res["dict"]["tensor"], 0) self.check_gpu(res["dict"]["tensor"], 0)
res = paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device="cpu") res = paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device="cpu")
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_gpu(res["tensor"], 0) self.check_gpu(res["tensor"], 0)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.check_gpu(res["dict"]["tensor"], 0) self.check_gpu(res["dict"]["tensor"], 0)
res = paddle_move_data_to_device(paddle_dict, device=None, data_device="gpu:1") res = paddle_move_data_to_device(paddle_dict, device=None, data_device="gpu:1")
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_gpu(res["tensor"], 1) self.check_gpu(res["tensor"], 1)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_gpu(t, 1) self.check_gpu(t, 1)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_gpu(t, 1) self.check_gpu(t, 1)
self.check_gpu(res["dict"]["tensor"], 1) self.check_gpu(res["dict"]["tensor"], 1)
res = paddle_move_data_to_device(paddle_dict, device="cpu", data_device="gpu:0") res = paddle_move_data_to_device(paddle_dict, device="cpu", data_device="gpu:0")
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_cpu(res["tensor"]) self.check_cpu(res["tensor"])
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_cpu(t) self.check_cpu(t)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_cpu(t) self.check_cpu(t)
self.check_cpu(res["dict"]["tensor"]) self.check_cpu(res["dict"]["tensor"])

View File

@ -1,5 +1,3 @@
import unittest
import paddle import paddle
import pytest import pytest
import torch import torch
@ -12,9 +10,8 @@ from fastNLP.core.utils.torch_paddle_utils import torch_paddle_move_data_to_devi
# #
############################################################################ ############################################################################
# @pytest.mark.paddle @pytest.mark.torchpaddle
# @pytest.mark.torch class TestTorchPaddleMoveDataToDevice:
class TorchPaddleMoveDataToDeviceTestCase(unittest.TestCase):
def check_gpu(self, tensor, idx): def check_gpu(self, tensor, idx):
""" """
@ -22,17 +19,17 @@ class TorchPaddleMoveDataToDeviceTestCase(unittest.TestCase):
""" """
if isinstance(tensor, paddle.Tensor): if isinstance(tensor, paddle.Tensor):
self.assertTrue(tensor.place.is_gpu_place()) assert tensor.place.is_gpu_place()
self.assertEqual(tensor.place.gpu_device_id(), idx) assert tensor.place.gpu_device_id() == idx
elif isinstance(tensor, torch.Tensor): elif isinstance(tensor, torch.Tensor):
self.assertTrue(tensor.is_cuda) assert tensor.is_cuda
self.assertEqual(tensor.device.index, idx) assert tensor.device.index == idx
def check_cpu(self, tensor): def check_cpu(self, tensor):
if isinstance(tensor, paddle.Tensor): if isinstance(tensor, paddle.Tensor):
self.assertTrue(tensor.place.is_cpu_place()) assert tensor.place.is_cpu_place()
elif isinstance(tensor, torch.Tensor): elif isinstance(tensor, torch.Tensor):
self.assertFalse(tensor.is_cuda) assert not tensor.is_cuda
def test_tensor_transfer(self): def test_tensor_transfer(self):
""" """
@ -63,7 +60,6 @@ class TorchPaddleMoveDataToDeviceTestCase(unittest.TestCase):
self.check_cpu(res) self.check_cpu(res)
res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:0", data_device=None) res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:0", data_device=None)
print(res.device)
self.check_gpu(res, 0) self.check_gpu(res, 0)
res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:1", data_device=None) res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:1", data_device=None)
@ -85,22 +81,22 @@ class TorchPaddleMoveDataToDeviceTestCase(unittest.TestCase):
paddle_list = [paddle.rand((6, 4, 2)) for i in range(5)] + [torch.rand((6, 4, 2)) for i in range(5)] paddle_list = [paddle.rand((6, 4, 2)) for i in range(5)] + [torch.rand((6, 4, 2)) for i in range(5)]
res = torch_paddle_move_data_to_device(paddle_list, device=None, data_device="gpu:1") res = torch_paddle_move_data_to_device(paddle_list, device=None, data_device="gpu:1")
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
res = torch_paddle_move_data_to_device(paddle_list, device="cpu", data_device="gpu:1") res = torch_paddle_move_data_to_device(paddle_list, device="cpu", data_device="gpu:1")
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_cpu(r) self.check_cpu(r)
res = torch_paddle_move_data_to_device(paddle_list, device="gpu:0", data_device=None) res = torch_paddle_move_data_to_device(paddle_list, device="gpu:0", data_device=None)
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_gpu(r, 0) self.check_gpu(r, 0)
res = torch_paddle_move_data_to_device(paddle_list, device="gpu:1", data_device="cpu") res = torch_paddle_move_data_to_device(paddle_list, device="gpu:1", data_device="cpu")
self.assertIsInstance(res, list) assert isinstance(res, list)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
@ -112,22 +108,22 @@ class TorchPaddleMoveDataToDeviceTestCase(unittest.TestCase):
paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)] + [torch.rand((6, 4, 2)) for i in range(5)] paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)] + [torch.rand((6, 4, 2)) for i in range(5)]
paddle_tuple = tuple(paddle_list) paddle_tuple = tuple(paddle_list)
res = torch_paddle_move_data_to_device(paddle_tuple, device=None, data_device="gpu:1") res = torch_paddle_move_data_to_device(paddle_tuple, device=None, data_device="gpu:1")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
res = torch_paddle_move_data_to_device(paddle_tuple, device="cpu", data_device="gpu:1") res = torch_paddle_move_data_to_device(paddle_tuple, device="cpu", data_device="gpu:1")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_cpu(r) self.check_cpu(r)
res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:0", data_device=None) res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:0", data_device=None)
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_gpu(r, 0) self.check_gpu(r, 0)
res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:1", data_device="cpu") res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:1", data_device="cpu")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for r in res: for r in res:
self.check_gpu(r, 1) self.check_gpu(r, 1)
@ -151,57 +147,57 @@ class TorchPaddleMoveDataToDeviceTestCase(unittest.TestCase):
} }
res = torch_paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device=None) res = torch_paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device=None)
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_gpu(res["torch_tensor"], 0) self.check_gpu(res["torch_tensor"], 0)
self.check_gpu(res["paddle_tensor"], 0) self.check_gpu(res["paddle_tensor"], 0)
self.assertIsInstance(res["torch_list"], list) assert isinstance(res["torch_list"], list)
for t in res["torch_list"]: for t in res["torch_list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_gpu(t, 0) self.check_gpu(t, 0)
self.check_gpu(res["dict"]["torch_tensor"], 0) self.check_gpu(res["dict"]["torch_tensor"], 0)
self.check_gpu(res["dict"]["paddle_tensor"], 0) self.check_gpu(res["dict"]["paddle_tensor"], 0)
res = torch_paddle_move_data_to_device(paddle_dict, device=None, data_device="gpu:1") res = torch_paddle_move_data_to_device(paddle_dict, device=None, data_device="gpu:1")
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_gpu(res["torch_tensor"], 1) self.check_gpu(res["torch_tensor"], 1)
self.check_gpu(res["paddle_tensor"], 1) self.check_gpu(res["paddle_tensor"], 1)
self.assertIsInstance(res["torch_list"], list) assert isinstance(res["torch_list"], list)
for t in res["torch_list"]: for t in res["torch_list"]:
self.check_gpu(t, 1) self.check_gpu(t, 1)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_gpu(t, 1) self.check_gpu(t, 1)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_gpu(t, 1) self.check_gpu(t, 1)
self.check_gpu(res["dict"]["torch_tensor"], 1) self.check_gpu(res["dict"]["torch_tensor"], 1)
self.check_gpu(res["dict"]["paddle_tensor"], 1) self.check_gpu(res["dict"]["paddle_tensor"], 1)
res = torch_paddle_move_data_to_device(paddle_dict, device="cpu", data_device="gpu:0") res = torch_paddle_move_data_to_device(paddle_dict, device="cpu", data_device="gpu:0")
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_cpu(res["torch_tensor"]) self.check_cpu(res["torch_tensor"])
self.check_cpu(res["paddle_tensor"]) self.check_cpu(res["paddle_tensor"])
self.assertIsInstance(res["torch_list"], list) assert isinstance(res["torch_list"], list)
for t in res["torch_list"]: for t in res["torch_list"]:
self.check_cpu(t) self.check_cpu(t)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_cpu(t) self.check_cpu(t)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_cpu(t) self.check_cpu(t)
self.check_cpu(res["dict"]["torch_tensor"]) self.check_cpu(res["dict"]["torch_tensor"])

View File

@ -26,9 +26,9 @@ class Paddle2TorchTestCase(unittest.TestCase):
检查张量设备和梯度情况的工具函数 检查张量设备和梯度情况的工具函数
""" """
self.assertIsInstance(tensor, torch.Tensor) assert isinstance(tensor, torch.Tensor)
self.assertEqual(tensor.device, torch.device(device)) assert tensor.device == torch.device(device)
self.assertEqual(tensor.requires_grad, requires_grad) assert tensor.requires_grad == requires_grad
def test_gradient(self): def test_gradient(self):
""" """
@ -39,7 +39,7 @@ class Paddle2TorchTestCase(unittest.TestCase):
y = paddle2torch(x) y = paddle2torch(x)
z = 3 * (y ** 2) z = 3 * (y ** 2)
z.sum().backward() z.sum().backward()
self.assertListEqual(y.grad.tolist(), [6, 12, 18, 24, 30]) assert y.grad.tolist() == [6, 12, 18, 24, 30]
def test_tensor_transfer(self): def test_tensor_transfer(self):
""" """
@ -66,12 +66,12 @@ class Paddle2TorchTestCase(unittest.TestCase):
paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)] paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)]
res = paddle2torch(paddle_list) res = paddle2torch(paddle_list)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_torch_tensor(t, "cuda:1", False) self.check_torch_tensor(t, "cuda:1", False)
res = paddle2torch(paddle_list, target_device="cpu", no_gradient=False) res = paddle2torch(paddle_list, target_device="cpu", no_gradient=False)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_torch_tensor(t, "cpu", True) self.check_torch_tensor(t, "cpu", True)
@ -83,7 +83,7 @@ class Paddle2TorchTestCase(unittest.TestCase):
paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)] paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)]
paddle_tuple = tuple(paddle_list) paddle_tuple = tuple(paddle_list)
res = paddle2torch(paddle_tuple) res = paddle2torch(paddle_tuple)
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for t in res: for t in res:
self.check_torch_tensor(t, "cuda:1", False) self.check_torch_tensor(t, "cuda:1", False)
@ -103,15 +103,15 @@ class Paddle2TorchTestCase(unittest.TestCase):
"string": "test string" "string": "test string"
} }
res = paddle2torch(paddle_dict) res = paddle2torch(paddle_dict)
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_torch_tensor(res["tensor"], "cuda:0", False) self.check_torch_tensor(res["tensor"], "cuda:0", False)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_torch_tensor(t, "cuda:0", False) self.check_torch_tensor(t, "cuda:0", False)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_torch_tensor(t, "cuda:0", False) self.check_torch_tensor(t, "cuda:0", False)
self.check_torch_tensor(res["dict"]["tensor"], "cuda:0", False) self.check_torch_tensor(res["dict"]["tensor"], "cuda:0", False)
@ -130,24 +130,24 @@ class Torch2PaddleTestCase(unittest.TestCase):
检查得到的paddle张量设备和梯度情况的工具函数 检查得到的paddle张量设备和梯度情况的工具函数
""" """
self.assertIsInstance(tensor, paddle.Tensor) assert isinstance(tensor, paddle.Tensor)
if device == "cpu": if device == "cpu":
self.assertTrue(tensor.place.is_cpu_place()) assert tensor.place.is_cpu_place()
elif device.startswith("gpu"): elif device.startswith("gpu"):
paddle_device = paddle.device._convert_to_place(device) paddle_device = paddle.device._convert_to_place(device)
self.assertTrue(tensor.place.is_gpu_place()) assert tensor.place.is_gpu_place()
if hasattr(tensor.place, "gpu_device_id"): if hasattr(tensor.place, "gpu_device_id"):
# paddle中有两种Place # paddle中有两种Place
# paddle.fluid.core.Place是创建Tensor时使用的类型 # paddle.fluid.core.Place是创建Tensor时使用的类型
# 有函数gpu_device_id获取设备 # 有函数gpu_device_id获取设备
self.assertEqual(tensor.place.gpu_device_id(), paddle_device.get_device_id()) assert tensor.place.gpu_device_id() == paddle_device.get_device_id()
else: else:
# 通过_convert_to_place得到的是paddle.CUDAPlace # 通过_convert_to_place得到的是paddle.CUDAPlace
# 通过get_device_id获取设备 # 通过get_device_id获取设备
self.assertEqual(tensor.place.get_device_id(), paddle_device.get_device_id()) assert tensor.place.get_device_id() == paddle_device.get_device_id()
else: else:
raise NotImplementedError raise NotImplementedError
self.assertEqual(tensor.stop_gradient, stop_gradient) assert tensor.stop_gradient == stop_gradient
def test_gradient(self): def test_gradient(self):
""" """
@ -158,7 +158,7 @@ class Torch2PaddleTestCase(unittest.TestCase):
y = torch2paddle(x) y = torch2paddle(x)
z = 3 * (y ** 2) z = 3 * (y ** 2)
z.sum().backward() z.sum().backward()
self.assertListEqual(y.grad.tolist(), [6, 12, 18, 24, 30]) assert y.grad.tolist() == [6, 12, 18, 24, 30]
def test_tensor_transfer(self): def test_tensor_transfer(self):
""" """
@ -185,12 +185,12 @@ class Torch2PaddleTestCase(unittest.TestCase):
torch_list = [torch.rand(6, 4, 2) for i in range(10)] torch_list = [torch.rand(6, 4, 2) for i in range(10)]
res = torch2paddle(torch_list) res = torch2paddle(torch_list)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_paddle_tensor(t, "cpu", True) self.check_paddle_tensor(t, "cpu", True)
res = torch2paddle(torch_list, target_device="gpu:1", no_gradient=False) res = torch2paddle(torch_list, target_device="gpu:1", no_gradient=False)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_paddle_tensor(t, "gpu:1", False) self.check_paddle_tensor(t, "gpu:1", False)
@ -202,7 +202,7 @@ class Torch2PaddleTestCase(unittest.TestCase):
torch_list = [torch.rand(6, 4, 2) for i in range(10)] torch_list = [torch.rand(6, 4, 2) for i in range(10)]
torch_tuple = tuple(torch_list) torch_tuple = tuple(torch_list)
res = torch2paddle(torch_tuple, target_device="cpu") res = torch2paddle(torch_tuple, target_device="cpu")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for t in res: for t in res:
self.check_paddle_tensor(t, "cpu", True) self.check_paddle_tensor(t, "cpu", True)
@ -222,15 +222,15 @@ class Torch2PaddleTestCase(unittest.TestCase):
"string": "test string" "string": "test string"
} }
res = torch2paddle(torch_dict) res = torch2paddle(torch_dict)
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_paddle_tensor(res["tensor"], "cpu", True) self.check_paddle_tensor(res["tensor"], "cpu", True)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_paddle_tensor(t, "cpu", True) self.check_paddle_tensor(t, "cpu", True)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_paddle_tensor(t, "cpu", True) self.check_paddle_tensor(t, "cpu", True)
self.check_paddle_tensor(res["dict"]["tensor"], "cpu", True) self.check_paddle_tensor(res["dict"]["tensor"], "cpu", True)
@ -249,12 +249,12 @@ class Jittor2TorchTestCase(unittest.TestCase):
检查得到的torch张量的工具函数 检查得到的torch张量的工具函数
""" """
self.assertIsInstance(tensor, torch.Tensor) assert isinstance(tensor, torch.Tensor)
if device == "cpu": if device == "cpu":
self.assertFalse(tensor.is_cuda) assert not tensor.is_cuda
else: else:
self.assertEqual(tensor.device, torch.device(device)) assert tensor.device == torch.device(device)
self.assertEqual(tensor.requires_grad, requires_grad) assert tensor.requires_grad == requires_grad
def test_var_transfer(self): def test_var_transfer(self):
""" """
@ -281,12 +281,12 @@ class Jittor2TorchTestCase(unittest.TestCase):
jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)] jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)]
res = jittor2torch(jittor_list) res = jittor2torch(jittor_list)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_torch_tensor(t, "cpu", True) self.check_torch_tensor(t, "cpu", True)
res = jittor2torch(jittor_list, target_device="cuda:1", no_gradient=False) res = jittor2torch(jittor_list, target_device="cuda:1", no_gradient=False)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_torch_tensor(t, "cuda:1", True) self.check_torch_tensor(t, "cuda:1", True)
@ -298,7 +298,7 @@ class Jittor2TorchTestCase(unittest.TestCase):
jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)] jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)]
jittor_tuple = tuple(jittor_list) jittor_tuple = tuple(jittor_list)
res = jittor2torch(jittor_tuple, target_device="cpu") res = jittor2torch(jittor_tuple, target_device="cpu")
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for t in res: for t in res:
self.check_torch_tensor(t, "cpu", True) self.check_torch_tensor(t, "cpu", True)
@ -318,15 +318,15 @@ class Jittor2TorchTestCase(unittest.TestCase):
"string": "test string" "string": "test string"
} }
res = jittor2torch(jittor_dict) res = jittor2torch(jittor_dict)
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_torch_tensor(res["tensor"], "cpu", True) self.check_torch_tensor(res["tensor"], "cpu", True)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_torch_tensor(t, "cpu", True) self.check_torch_tensor(t, "cpu", True)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_torch_tensor(t, "cpu", True) self.check_torch_tensor(t, "cpu", True)
self.check_torch_tensor(res["dict"]["tensor"], "cpu", True) self.check_torch_tensor(res["dict"]["tensor"], "cpu", True)
@ -345,8 +345,8 @@ class Torch2JittorTestCase(unittest.TestCase):
检查得到的Jittor Var梯度情况的工具函数 检查得到的Jittor Var梯度情况的工具函数
""" """
self.assertIsInstance(var, jittor.Var) assert isinstance(var, jittor.Var)
self.assertEqual(var.requires_grad, requires_grad) assert var.requires_grad == requires_grad
def test_gradient(self): def test_gradient(self):
""" """
@ -357,7 +357,7 @@ class Torch2JittorTestCase(unittest.TestCase):
y = torch2jittor(x) y = torch2jittor(x)
z = 3 * (y ** 2) z = 3 * (y ** 2)
grad = jittor.grad(z, y) grad = jittor.grad(z, y)
self.assertListEqual(grad.tolist(), [6.0, 12.0, 18.0, 24.0, 30.0]) assert grad.tolist() == [6.0, 12.0, 18.0, 24.0, 30.0]
def test_tensor_transfer(self): def test_tensor_transfer(self):
""" """
@ -384,12 +384,12 @@ class Torch2JittorTestCase(unittest.TestCase):
torch_list = [torch.rand((6, 4, 2)) for i in range(10)] torch_list = [torch.rand((6, 4, 2)) for i in range(10)]
res = torch2jittor(torch_list) res = torch2jittor(torch_list)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_jittor_var(t, False) self.check_jittor_var(t, False)
res = torch2jittor(torch_list, no_gradient=False) res = torch2jittor(torch_list, no_gradient=False)
self.assertIsInstance(res, list) assert isinstance(res, list)
for t in res: for t in res:
self.check_jittor_var(t, True) self.check_jittor_var(t, True)
@ -401,7 +401,7 @@ class Torch2JittorTestCase(unittest.TestCase):
torch_list = [torch.rand((6, 4, 2)) for i in range(10)] torch_list = [torch.rand((6, 4, 2)) for i in range(10)]
torch_tuple = tuple(torch_list) torch_tuple = tuple(torch_list)
res = torch2jittor(torch_tuple) res = torch2jittor(torch_tuple)
self.assertIsInstance(res, tuple) assert isinstance(res, tuple)
for t in res: for t in res:
self.check_jittor_var(t, False) self.check_jittor_var(t, False)
@ -421,15 +421,15 @@ class Torch2JittorTestCase(unittest.TestCase):
"string": "test string" "string": "test string"
} }
res = torch2jittor(torch_dict) res = torch2jittor(torch_dict)
self.assertIsInstance(res, dict) assert isinstance(res, dict)
self.check_jittor_var(res["tensor"], False) self.check_jittor_var(res["tensor"], False)
self.assertIsInstance(res["list"], list) assert isinstance(res["list"], list)
for t in res["list"]: for t in res["list"]:
self.check_jittor_var(t, False) self.check_jittor_var(t, False)
self.assertIsInstance(res["int"], int) assert isinstance(res["int"], int)
self.assertIsInstance(res["string"], str) assert isinstance(res["string"], str)
self.assertIsInstance(res["dict"], dict) assert isinstance(res["dict"], dict)
self.assertIsInstance(res["dict"]["list"], list) assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]: for t in res["dict"]["list"]:
self.check_jittor_var(t, False) self.check_jittor_var(t, False)
self.check_jittor_var(res["dict"]["tensor"], False) self.check_jittor_var(res["dict"]["tensor"], False)

View File

@ -1,4 +1,4 @@
import unittest import pytest
import os import os
from itertools import chain from itertools import chain
@ -18,9 +18,9 @@ from fastNLP.core import rank_zero_rm
# #
############################################################################ ############################################################################
class TestMixModule(MixModule): class MixModuleForTest(MixModule):
def __init__(self): def __init__(self):
super(TestMixModule, self).__init__() super(MixModuleForTest, self).__init__()
self.torch_fc1 = torch.nn.Linear(10, 10) self.torch_fc1 = torch.nn.Linear(10, 10)
self.torch_softmax = torch.nn.Softmax(0) self.torch_softmax = torch.nn.Softmax(0)
@ -33,9 +33,9 @@ class TestMixModule(MixModule):
self.paddle_conv2d1 = paddle.nn.Conv2D(10, 10, 3) self.paddle_conv2d1 = paddle.nn.Conv2D(10, 10, 3)
self.paddle_tensor = paddle.ones((4, 4)) self.paddle_tensor = paddle.ones((4, 4))
class TestTorchModule(torch.nn.Module): class TorchModuleForTest(torch.nn.Module):
def __init__(self): def __init__(self):
super(TestTorchModule, self).__init__() super(TorchModuleForTest, self).__init__()
self.torch_fc1 = torch.nn.Linear(10, 10) self.torch_fc1 = torch.nn.Linear(10, 10)
self.torch_softmax = torch.nn.Softmax(0) self.torch_softmax = torch.nn.Softmax(0)
@ -43,9 +43,9 @@ class TestTorchModule(torch.nn.Module):
self.torch_tensor = torch.ones(3, 3) self.torch_tensor = torch.ones(3, 3)
self.torch_param = torch.nn.Parameter(torch.ones(4, 4)) self.torch_param = torch.nn.Parameter(torch.ones(4, 4))
class TestPaddleModule(paddle.nn.Layer): class PaddleModuleForTest(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(TestPaddleModule, self).__init__() super(PaddleModuleForTest, self).__init__()
self.paddle_fc1 = paddle.nn.Linear(10, 10) self.paddle_fc1 = paddle.nn.Linear(10, 10)
self.paddle_softmax = paddle.nn.Softmax(0) self.paddle_softmax = paddle.nn.Softmax(0)
@ -53,13 +53,14 @@ class TestPaddleModule(paddle.nn.Layer):
self.paddle_tensor = paddle.ones((4, 4)) self.paddle_tensor = paddle.ones((4, 4))
class TorchPaddleMixModuleTestCase(unittest.TestCase): @pytest.mark.torchpaddle
class TestTorchPaddleMixModule:
def setUp(self): def setup_method(self):
self.model = TestMixModule() self.model = MixModuleForTest()
self.torch_model = TestTorchModule() self.torch_model = TorchModuleForTest()
self.paddle_model = TestPaddleModule() self.paddle_model = PaddleModuleForTest()
def test_to(self): def test_to(self):
""" """
@ -110,7 +111,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
for value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()): for value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()):
params.append(value) params.append(value)
self.assertEqual(len(params), len(mix_params)) assert len(params) == len(mix_params)
def test_named_parameters(self): def test_named_parameters(self):
""" """
@ -126,7 +127,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
for name, value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()): for name, value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()):
param_names.append(name) param_names.append(name)
self.assertListEqual(sorted(param_names), sorted(mix_param_names)) assert sorted(param_names) == sorted(mix_param_names)
def test_torch_named_parameters(self): def test_torch_named_parameters(self):
""" """
@ -142,7 +143,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
for name, value in self.torch_model.named_parameters(): for name, value in self.torch_model.named_parameters():
param_names.append(name) param_names.append(name)
self.assertListEqual(sorted(param_names), sorted(mix_param_names)) assert sorted(param_names) == sorted(mix_param_names)
def test_paddle_named_parameters(self): def test_paddle_named_parameters(self):
""" """
@ -158,7 +159,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
for name, value in self.paddle_model.named_parameters(): for name, value in self.paddle_model.named_parameters():
param_names.append(name) param_names.append(name)
self.assertListEqual(sorted(param_names), sorted(mix_param_names)) assert sorted(param_names) == sorted(mix_param_names)
def test_torch_state_dict(self): def test_torch_state_dict(self):
""" """
@ -167,7 +168,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
torch_dict = self.torch_model.state_dict() torch_dict = self.torch_model.state_dict()
mix_dict = self.model.state_dict(backend="torch") mix_dict = self.model.state_dict(backend="torch")
self.assertListEqual(sorted(torch_dict.keys()), sorted(mix_dict.keys())) assert sorted(torch_dict.keys()) == sorted(mix_dict.keys())
def test_paddle_state_dict(self): def test_paddle_state_dict(self):
""" """
@ -177,7 +178,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
mix_dict = self.model.state_dict(backend="paddle") mix_dict = self.model.state_dict(backend="paddle")
# TODO 测试程序会显示passed后显示paddle的异常退出信息 # TODO 测试程序会显示passed后显示paddle的异常退出信息
self.assertListEqual(sorted(paddle_dict.keys()), sorted(mix_dict.keys())) assert sorted(paddle_dict.keys()) == sorted(mix_dict.keys())
def test_state_dict(self): def test_state_dict(self):
""" """
@ -188,7 +189,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
mix_dict = self.model.state_dict() mix_dict = self.model.state_dict()
# TODO 测试程序会显示passed后显示paddle的异常退出信息 # TODO 测试程序会显示passed后显示paddle的异常退出信息
self.assertListEqual(sorted(all_dict.keys()), sorted(mix_dict.keys())) assert sorted(all_dict.keys()) == sorted(mix_dict.keys())
def test_load_state_dict(self): def test_load_state_dict(self):
""" """
@ -196,7 +197,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
""" """
state_dict = self.model.state_dict() state_dict = self.model.state_dict()
new_model = TestMixModule() new_model = MixModuleForTest()
new_model.load_state_dict(state_dict) new_model.load_state_dict(state_dict)
new_state_dict = new_model.state_dict() new_state_dict = new_model.state_dict()
@ -205,7 +206,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
for name, value in new_state_dict.items(): for name, value in new_state_dict.items():
new_state_dict[name] = value.tolist() new_state_dict[name] = value.tolist()
self.assertDictEqual(state_dict, new_state_dict) # self.assertDictEqual(state_dict, new_state_dict)
def test_save_and_load_state_dict(self): def test_save_and_load_state_dict(self):
""" """
@ -214,7 +215,7 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
path = "model" path = "model"
try: try:
self.model.save_state_dict_to_file(path) self.model.save_state_dict_to_file(path)
new_model = TestMixModule() new_model = MixModuleForTest()
new_model.load_state_dict_from_file(path) new_model.load_state_dict_from_file(path)
state_dict = self.model.state_dict() state_dict = self.model.state_dict()
@ -225,49 +226,49 @@ class TorchPaddleMixModuleTestCase(unittest.TestCase):
for name, value in new_state_dict.items(): for name, value in new_state_dict.items():
new_state_dict[name] = value.tolist() new_state_dict[name] = value.tolist()
self.assertDictEqual(state_dict, new_state_dict) # self.assertDictEqual(state_dict, new_state_dict)
finally: finally:
rank_zero_rm(path) rank_zero_rm(path)
def if_device_correct(self, device): def if_device_correct(self, device):
self.assertEqual(self.model.torch_fc1.weight.device, self.torch_model.torch_fc1.weight.device) assert self.model.torch_fc1.weight.device == self.torch_model.torch_fc1.weight.device
self.assertEqual(self.model.torch_conv2d1.weight.device, self.torch_model.torch_fc1.bias.device) assert self.model.torch_conv2d1.weight.device == self.torch_model.torch_fc1.bias.device
self.assertEqual(self.model.torch_conv2d1.bias.device, self.torch_model.torch_conv2d1.bias.device) assert self.model.torch_conv2d1.bias.device == self.torch_model.torch_conv2d1.bias.device
self.assertEqual(self.model.torch_tensor.device, self.torch_model.torch_tensor.device) assert self.model.torch_tensor.device == self.torch_model.torch_tensor.device
self.assertEqual(self.model.torch_param.device, self.torch_model.torch_param.device) assert self.model.torch_param.device == self.torch_model.torch_param.device
if device == "cpu": if device == "cpu":
self.assertTrue(self.model.paddle_fc1.weight.place.is_cpu_place()) assert self.model.paddle_fc1.weight.place.is_cpu_place()
self.assertTrue(self.model.paddle_fc1.bias.place.is_cpu_place()) assert self.model.paddle_fc1.bias.place.is_cpu_place()
self.assertTrue(self.model.paddle_conv2d1.weight.place.is_cpu_place()) assert self.model.paddle_conv2d1.weight.place.is_cpu_place()
self.assertTrue(self.model.paddle_conv2d1.bias.place.is_cpu_place()) assert self.model.paddle_conv2d1.bias.place.is_cpu_place()
self.assertTrue(self.model.paddle_tensor.place.is_cpu_place()) assert self.model.paddle_tensor.place.is_cpu_place()
elif device.startswith("cuda"): elif device.startswith("cuda"):
self.assertTrue(self.model.paddle_fc1.weight.place.is_gpu_place()) assert self.model.paddle_fc1.weight.place.is_gpu_place()
self.assertTrue(self.model.paddle_fc1.bias.place.is_gpu_place()) assert self.model.paddle_fc1.bias.place.is_gpu_place()
self.assertTrue(self.model.paddle_conv2d1.weight.place.is_gpu_place()) assert self.model.paddle_conv2d1.weight.place.is_gpu_place()
self.assertTrue(self.model.paddle_conv2d1.bias.place.is_gpu_place()) assert self.model.paddle_conv2d1.bias.place.is_gpu_place()
self.assertTrue(self.model.paddle_tensor.place.is_gpu_place()) assert self.model.paddle_tensor.place.is_gpu_place()
self.assertEqual(self.model.paddle_fc1.weight.place.gpu_device_id(), self.paddle_model.paddle_fc1.weight.place.gpu_device_id()) assert self.model.paddle_fc1.weight.place.gpu_device_id() == self.paddle_model.paddle_fc1.weight.place.gpu_device_id()
self.assertEqual(self.model.paddle_fc1.bias.place.gpu_device_id(), self.paddle_model.paddle_fc1.bias.place.gpu_device_id()) assert self.model.paddle_fc1.bias.place.gpu_device_id() == self.paddle_model.paddle_fc1.bias.place.gpu_device_id()
self.assertEqual(self.model.paddle_conv2d1.weight.place.gpu_device_id(), self.paddle_model.paddle_conv2d1.weight.place.gpu_device_id()) assert self.model.paddle_conv2d1.weight.place.gpu_device_id() == self.paddle_model.paddle_conv2d1.weight.place.gpu_device_id()
self.assertEqual(self.model.paddle_conv2d1.bias.place.gpu_device_id(), self.paddle_model.paddle_conv2d1.bias.place.gpu_device_id()) assert self.model.paddle_conv2d1.bias.place.gpu_device_id() == self.paddle_model.paddle_conv2d1.bias.place.gpu_device_id()
self.assertEqual(self.model.paddle_tensor.place.gpu_device_id(), self.paddle_model.paddle_tensor.place.gpu_device_id()) assert self.model.paddle_tensor.place.gpu_device_id() == self.paddle_model.paddle_tensor.place.gpu_device_id()
else: else:
raise NotImplementedError raise NotImplementedError
def if_training_correct(self, training): def if_training_correct(self, training):
self.assertEqual(self.model.torch_fc1.training, training) assert self.model.torch_fc1.training == training
self.assertEqual(self.model.torch_softmax.training, training) assert self.model.torch_softmax.training == training
self.assertEqual(self.model.torch_conv2d1.training, training) assert self.model.torch_conv2d1.training == training
self.assertEqual(self.model.paddle_fc1.training, training) assert self.model.paddle_fc1.training == training
self.assertEqual(self.model.paddle_softmax.training, training) assert self.model.paddle_softmax.training == training
self.assertEqual(self.model.paddle_conv2d1.training, training) assert self.model.paddle_conv2d1.training == training
############################################################################ ############################################################################
@ -311,10 +312,11 @@ class MixMNISTModel(MixModule):
return torch_out return torch_out
class TestMNIST(unittest.TestCase): @pytest.mark.torchpaddle
class TestMNIST:
@classmethod @classmethod
def setUpClass(self): def setup_class(self):
self.train_dataset = paddle.vision.datasets.MNIST(mode='train') self.train_dataset = paddle.vision.datasets.MNIST(mode='train')
self.test_dataset = paddle.vision.datasets.MNIST(mode='test') self.test_dataset = paddle.vision.datasets.MNIST(mode='test')
@ -325,7 +327,7 @@ class TestMNIST(unittest.TestCase):
self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True) self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)
def setUp(self): def setup_method(self):
self.model = MixMNISTModel().to("cuda") self.model = MixMNISTModel().to("cuda")
self.torch_loss_func = torch.nn.CrossEntropyLoss() self.torch_loss_func = torch.nn.CrossEntropyLoss()
@ -353,7 +355,7 @@ class TestMNIST(unittest.TestCase):
self.paddle_opt.clear_grad() self.paddle_opt.clear_grad()
else: else:
self.assertLess(epoch_loss / (batch + 1), 0.3) assert epoch_loss / (batch + 1) < 0.3
# 开始测试 # 开始测试
correct = 0 correct = 0
@ -367,7 +369,7 @@ class TestMNIST(unittest.TestCase):
correct += 1 correct += 1
acc = correct / len(self.test_dataset) acc = correct / len(self.test_dataset)
self.assertGreater(acc, 0.85) assert acc > 0.85
############################################################################ ############################################################################
# #