Skip to content

Commit

Permalink
add ICML 24 paper
Browse files Browse the repository at this point in the history
  • Loading branch information
lexingxie authored Sep 4, 2024
1 parent 9e5b347 commit 5dc41b8
Showing 1 changed file with 11 additions and 2 deletions.
13 changes: 11 additions & 2 deletions static/documents/publications.bib
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
@inproceedings{zhu2024online,
title={Online Learning in Betting Markets: Profit versus Prediction},
author={Zhu, Haiqing and Soen, Alexander and Cheung, Yun Kuen and Xie, Lexing},
booktitle={Forty-first International Conference on Machine Learning (ICML '24)},
year = {2024},
abstract = {We examine two types of binary betting markets, whose primary goal is for profit (such as sports gambling) or to gain information (such as prediction markets). We articulate the interplay between belief and price-setting to analyse both types of markets, and show that the goals of maximising bookmaker profit and eliciting information are fundamentally incompatible. A key insight is that profit hinges on the deviation between (the distribution of) bettor and true beliefs, and that heavier tails in bettor belief distribution imply higher profit. Our algorithmic contribution is to introduce online learning methods for price-setting. Traditionally bookmakers update their prices rather infrequently, we present two algorithms that guide price updates upon seeing each bet, assuming very little of bettor belief distributions. The online pricing algorithm achieves stochastic regret of $O(\sqrt{T})$ against the worst local maximum, or $O(T log \sqrt{T} )$ with high probability against the global maximum under fair odds. More broadly, the inherent trade-off between profit and information-seeking in binary betting may inspire new understandings of large-scale multi-agent behaviour.},
url_abstract = {https://arxiv.org/abs/2406.04062},
url_paper = {https://arxiv.org/pdf/2406.04062}
}

@inproceedings{nguyen2024icwsm,
author = {Nguyen, Tuan Dung and Chen, Ziyu and Carroll, Nicholas George and Tran, Alasdair and Klein, Colin and Xie, Lexing},
title = {Measuring Moral Dimensions in Social Media with {Mformer}},
booktitle = {International AAAI Conference on Web and Social Media (ICWSM '24)},
year = {2024},
abstract = {The ever-growing textual records of contemporary social issues, often discussed online with moral rhetoric, present both an opportunity and a challenge for studying how moral concerns are debated in real life. Moral foundations theory is a taxonomy of intuitions widely used in data-driven analyses of online content, but current computational tools to detect moral foundations suffer from the incompleteness and fragility of their lexicons and from poor generalization across data domains. In this paper, we fine-tune a large language model to measure moral foundations in text based on datasets covering news media and long- and short-form online discussions. The resulting model, called Mformer, outperforms existing approaches on the same domains by 4--12% in AUC and further generalizes well to four commonly used moral text datasets, improving by up to 17% in AUC. We present case studies using Mformer to analyze everyday moral dilemmas on Reddit and controversies on Twitter, showing that moral foundations can meaningfully describe people's stance on social issues and such variations are topic-dependent. Pre-trained model and datasets are released publicly. We posit that Mformer will help the research community quantify moral dimensions for a range of tasks and data domains, and eventually contribute to the understanding of moral situations faced by humans and machines.
},
abstract = {The ever-growing textual records of contemporary social issues, often discussed online with moral rhetoric, present both an opportunity and a challenge for studying how moral concerns are debated in real life. Moral foundations theory is a taxonomy of intuitions widely used in data-driven analyses of online content, but current computational tools to detect moral foundations suffer from the incompleteness and fragility of their lexicons and from poor generalization across data domains. In this paper, we fine-tune a large language model to measure moral foundations in text based on datasets covering news media and long- and short-form online discussions. The resulting model, called Mformer, outperforms existing approaches on the same domains by 4--12% in AUC and further generalizes well to four commonly used moral text datasets, improving by up to 17% in AUC. We present case studies using Mformer to analyze everyday moral dilemmas on Reddit and controversies on Twitter, showing that moral foundations can meaningfully describe people's stance on social issues and such variations are topic-dependent. Pre-trained model and datasets are released publicly. We posit that Mformer will help the research community quantify moral dimensions for a range of tasks and data domains, and eventually contribute to the understanding of moral situations faced by humans and machines.},
url_abstract = {https://arxiv.org/abs/2311.10219},
url_paper = {https://arxiv.org/pdf/2311.10219}
}
Expand Down

0 comments on commit 5dc41b8

Please sign in to comment.