Unverified Commit d223820e authored by NingMa's avatar NingMa Committed by GitHub

Update README.md

parent f0416ebf
......@@ -32,16 +32,16 @@ The other datasets follow the similar usages!
If you find the repo is helpful, feel free to star and cite us:
@article{MA2023110208,
title = {Source-free semi-supervised domain adaptation via progressive Mixup},
journal = {Knowledge-Based Systems},
volume = {262},
pages = {110208},
year = {2023},
issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2022.110208},
url = {https://www.sciencedirect.com/science/article/pii/S0950705122013041},
author = {Ning Ma and Haishuai Wang and Zhen Zhang and Sheng Zhou and Hongyang Chen and Jiajun Bu},
keywords = {Domain adaptation, Semi-supervised learning, Data augmentation},
abstract = {Existing domain adaptation methods usually perform explicit representation alignment by simultaneously accessing the source data and target data. However, the source data are not always available due to the privacy preserving consideration or bandwidth limitations. To address this issue, source-free domain adaptation is proposed to perform domain adaptation without accessing the source data. Recently, the adaptation paradigm is attracting increasing attention, and multiple works have been proposed for unsupervised source-free domain adaptation. However, without utilizing any supervised signal and source data at the adaptation stage, the optimization of the target model is unstable and fragile. To alleviate the problem, we focus on utilizing a few labeled target data to guide the adaptation, which forms our method into semi-supervised domain adaptation under a source-free setting. We propose a progressive data interpolation strategy including progressive anchor selection and dynamic interpolation rate to reduce the intra-domain discrepancy and inter-domain representation gap. Extensive experiments on three public datasets demonstrate the effectiveness as well as the better scalability of our method.}
}
@article{MA2023110208,
title = {Source-free semi-supervised domain adaptation via progressive Mixup},
journal = {Knowledge-Based Systems},
volume = {262},
pages = {110208},
year = {2023},
issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2022.110208},
url = {https://www.sciencedirect.com/science/article/pii/S0950705122013041},
author = {Ning Ma and Haishuai Wang and Zhen Zhang and Sheng Zhou and Hongyang Chen and Jiajun Bu},
keywords = {Domain adaptation, Semi-supervised learning, Data augmentation},
abstract = {Existing domain adaptation methods usually perform explicit representation alignment by simultaneously accessing the source data and target data. However, the source data are not always available due to the privacy preserving consideration or bandwidth limitations. To address this issue, source-free domain adaptation is proposed to perform domain adaptation without accessing the source data. Recently, the adaptation paradigm is attracting increasing attention, and multiple works have been proposed for unsupervised source-free domain adaptation. However, without utilizing any supervised signal and source data at the adaptation stage, the optimization of the target model is unstable and fragile. To alleviate the problem, we focus on utilizing a few labeled target data to guide the adaptation, which forms our method into semi-supervised domain adaptation under a source-free setting. We propose a progressive data interpolation strategy including progressive anchor selection and dynamic interpolation rate to reduce the intra-domain discrepancy and inter-domain representation gap. Extensive experiments on three public datasets demonstrate the effectiveness as well as the better scalability of our method.}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment