diff --git a/cms/assets/img/sponsors/oaworks.png b/cms/assets/img/sponsors/oaworks.png
new file mode 100644
index 0000000000..6909779866
Binary files /dev/null and b/cms/assets/img/sponsors/oaworks.png differ
diff --git a/cms/assets/img/volunteers/Kadri Kiran.jpg b/cms/assets/img/volunteers/Kadri Kiran.jpg
new file mode 100644
index 0000000000..a3cd64eaa5
Binary files /dev/null and b/cms/assets/img/volunteers/Kadri Kiran.jpg differ
diff --git a/cms/assets/img/volunteers/Pablo.jpg b/cms/assets/img/volunteers/Pablo.jpg
new file mode 100644
index 0000000000..020e5e0a9a
Binary files /dev/null and b/cms/assets/img/volunteers/Pablo.jpg differ
diff --git a/cms/data/notifications.yml b/cms/data/notifications.yml
index 6aca840e49..1a67c6c7c2 100644
--- a/cms/data/notifications.yml
+++ b/cms/data/notifications.yml
@@ -142,3 +142,8 @@ update_request:publisher:rejected:notify:
short:
Your update request was rejected
+journal:assed:discontinuing_soon:notify:
+ long: |
+ Journal "{title}" (id: {id}) will discontinue in {days} days.
+ short:
+ Journal discontinuing
\ No newline at end of file
diff --git a/cms/data/sponsors.yml b/cms/data/sponsors.yml
index 2c78f9e2d6..157b0defe4 100644
--- a/cms/data/sponsors.yml
+++ b/cms/data/sponsors.yml
@@ -39,6 +39,10 @@ silver:
- name: MDPI
url: https://www.mdpi.com/
logo: mdpi.svg
+
+- name: OA.Works
+ url: https://oa.works/
+ logo: oaworks.png
- name: SAGE Publishing
url: https://www.sagepublications.com/
@@ -102,6 +106,10 @@ bronze:
url: https://www.keaipublishing.com/
logo: keai.svg
+- name: OASPA
+ url: https://oaspa.org/
+ logo: oaspa.png
+
- name: OCLC
url: https://www.oclc.org/
logo: oclc.svg
diff --git a/cms/data/team.yml b/cms/data/team.yml
index 638b9db3d6..ce175c0667 100644
--- a/cms/data/team.yml
+++ b/cms/data/team.yml
@@ -50,22 +50,11 @@
- name: Iemima Ploscariu
role: Managing Editor
photo: iemima.jpeg
- bio: 'Iemima holds a Master of Arts in Comparative History from Central European University, Hungary, and a Master of Letters in Central and East European Studies from the University of St Andrews, Scotland. She is a PhD student and researcher for the Irish Research Council at Dublin City University, Ireland. She volunteered as an Editor for DOAJ from 2014 until 2020 when she became a Managing Editor.'
+ bio: 'Iemima holds a PhD in History from Dublin City University, Ireland, as an Irish Research Council fellow. She also has an MA in Comparative History and an MLitt in Central and East European Studies. She is originally from Romania, grew up in California, and now lives in Barcelona, Spain. She volunteered as an Editor for DOAJ from 2014 until 2020 when she became a Managing Editor.'
coi:
2020: https://drive.google.com/file/d/1-4wTgvwCu_tvDv5NIoJhZi0QpQl-fpdB/view?usp=sharing
2022: https://drive.google.com/file/d/1xEUUxhqSE0OnKd_x8z1oLLRRrJrKAwXA/view?usp=sharing
-- name: Ilaria Fava
- role: Managing Editor
- photo: Ilaria.jpg
- bio: 'Ilaria is a librarian with several years of experience within the Open Access community in her home country of Italy, where she has dealt with Open Access issues at both national and international level. She also serves the Göttingen State and University Library working on Open Science projects.
-Based in Rome and Göttingen, Ilaria loves baking cakes; she speaks Italian, English, some Spanish and a little German.'
- coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mY2dRZTR5eTFjQkk/view?usp=sharing&resourcekey=0-fPa6ce_HjfoVQqKGqWxLNw
- 2018: https://drive.google.com/file/d/1AMi0uIWHgEiaqmJLM7f_SFsiLEJENfjF/view?usp=sharing
- 2020: https://drive.google.com/file/d/1jWZKc6xjp3yfo6qjQWp6Yp3y-71ZHLth/view?usp=sharing
- 2022: https://drive.google.com/file/d/1_au6llN2ALPnkNTgUrzLrSTw8gundldJ/view?usp=sharing
-
- name: Joanna Ball
role: Managing Director
photo: joba.jpg
@@ -120,14 +109,6 @@ Based in Rome and Göttingen, Ilaria loves baking cakes; she speaks Italian, Eng
2020: https://drive.google.com/file/d/1zU-lLB5W54E_QUm5uto5tqB6cZl83TAJ/view?usp=sharing
2022: https://drive.google.com/file/d/19rw-naMJqHkI5T7aDIDPUkwPutBdDpDm/view?usp=sharing
-- name: Louise Stoddard
- role: Communications Manager
- photo: louise.jpg
- bio: "Louise has over 15 years experience in communications and public relations for non-profit and international organisations. She holds a Masters in Journalism and a Bachelor of Science and Economics in International Development from The University of Wales, Swansea. Louise has worked for the United Nations for 8 years, also with academic and non-profit organisations promoting access to knowledge and information. In 2021 Louise joined DOAJ as the focal point for public relations and communications. Outside of work Louise can usually be found in her vegetable garden."
- coi:
- 2021: https://drive.google.com/file/d/1DmDsIkv-orjF7QGEVwqFDAl0EQx3qRXg/view?usp=sharing
- 2022: https://drive.google.com/file/d/1wOeX97BZGEX50orKo6TwlWmGsdbLFfBD/view?usp=sharing
-
- name: Luis Montilla
role: Managing Editor
photo: luis.jpeg
@@ -193,7 +174,7 @@ Based in Rome and Göttingen, Ilaria loves baking cakes; she speaks Italian, Eng
2022: https://drive.google.com/file/d/19J5ELHNaV_pby7ZpQMii8_Ts4wiERu8K/view?usp=sharing
- name: Tom Olijhoek
- role: Editor-in-Chief
+ role: Head of Outreach
photo: tom.jpg
bio: 'Tom has a PhD in molecular microbiology and spent several years in Africa doing research on malaria, sleeping sickness and meningococcal epidemics. He has been actively advocating open access and open science since 2012 when he joined the Open Knowledge community and became a member of the DOAJ advisory board. His current research interests are development of quality systems for the assessment of scholarly journals and articles, and research in the area of soil microbiology in relation to soil health and human health.'
coi:
diff --git a/cms/data/volunteers.yml b/cms/data/volunteers.yml
index 3b82092c1b..9e09159ad7 100644
--- a/cms/data/volunteers.yml
+++ b/cms/data/volunteers.yml
@@ -68,6 +68,14 @@ ed:
featured: true
photo: "napa.jpg"
+- name: Pablo Hernandez
+ area: Medical Sciences
+ year_since:
+ city: Caracas
+ country: Venezuela
+ language: Spanish, English
+ photo: "Pablo.jpg"
+
- name: Paola Galimberti
area: Research Evaluation
year_since:
@@ -456,6 +464,14 @@ ass_ed:
country:
language: Korean, English
+- name: Kadri Kıran
+ area: Systematic Entomology
+ year_since:
+ city: Edirne
+ country: Türkiye
+ language: English, Turkish, German
+ photo: "Kadri Kiran.jpg"
+
- name: Kâmil B. Varınca
area: Science
year_since:
@@ -687,14 +703,6 @@ ass_ed:
country: Spain
language: Euskera, Spanish, English
-- name: Pablo Hernandez
- area: Science
- year_since:
- city: Caracas
- country: Venezuela
- language: Spanish, English
- photo: "pablohernandez.jpg"
-
- name: Paula Carina de Araújo
area: Library and Information Science
year_since:
diff --git a/cms/pages/about/at-20.md b/cms/pages/about/at-20.md
index b01af20771..b364aa00da 100644
--- a/cms/pages/about/at-20.md
+++ b/cms/pages/about/at-20.md
@@ -23,16 +23,17 @@ Further down the page is a historical timeline to give you a full overview of DO
[//]: # (NB. adding whitespace around the titles will break styling)
{.events .unstyled-list}
- {% include "includes/svg/at-20/theme_open.svg" %}
- - **[Registration is open](https://us02web.zoom.us/webinar/register/WN_-b000to3RZKexuFsJGJw1g#/registration)**
+ - **[Recording is available](https://www.youtube.com/watch?v=qnpSdX3eusk)**
- Name: _DOAJ at 20: Open_
- Date: 15th June 2023
- - Event Time: 13:00 UTC ([Check the event time](https://www.timeanddate.com/worldclock/fixedtime.html?iso=20230615T13&ah=1&am=30) where you are.)
+ - Event Time: 13:00 UTC
- Duration: 90 mins
- {% include "includes/svg/at-20/theme_global.svg" %}
+ - **[Registration is open](https://us02web.zoom.us/webinar/register/WN_fu42oi59S7GZ366rjyAUGg#/registration)**
- Name: _DOAJ at 20: Global_
- Date: _28th September 2023_
- - Event Time: to be confirmed
- - Duration: 90 mins
+ - Event Time: 13:00 UTC ([Check the event time](https://www.timeanddate.com/worldclock/fixedtime.html?iso=20230928T13&ah=1&am=30) where you are.)
+ - Duration: 2 hours
- {% include "includes/svg/at-20/theme_trusted.svg" %}
- Name: _DOAJ at 20: Trusted_
- Date: _7th December 2023_
@@ -41,27 +42,15 @@ Further down the page is a historical timeline to give you a full overview of DO
## Open
-Join us for the first of three events marking our 20th anniversary as a key open infrastructure. 'DOAJ at 20: Open' is free and open to researchers, librarians, research support staff, publishers, and anyone interested in open access!
-
-The event will build around the theme ‘open’, where our moderator (Abeni Wickham) will be chatting with our three guests: Lars Bjørnshauge, Mikael Laakso, and Nadine Buckland. The discussion will focus on their thoughts and aspirations on open scholarship. They will also explore the obstacles and challenges in adopting immediate open access.
-
-The event will last 90 minutes.
-
-**Abeni Wickham**
-
-Abeni was born in Guyana, South America and holds a PhD in Molecular Physics from Linkoping University. She left academia in 2018 to create SciFree, a software company with a mission to make research open to the public for free. SciFree currently serves 45 University Library customers in Sweden, Denmark, the United Kingdom and the USA. Besides building new tech platforms for university infrastructure, Abeni volunteers on the NASIG Digital Preservation committee, helps PhDs transition in their careers and enjoys surfing both actual waves and the Open Access wave worldwide.
+Our first of three events marking our 20th anniversary took place on the 15th June 2023. The event was built around the theme ‘open’, where our moderator (Abeni Wickham) had a coversation with our three guests: Lars Bjørnshauge, Mikael Laakso, and Nadine Buckland. The discussion focused on development and changes over the last 20 years, with reflections from all speakers on what the next years will bring. A recording of the event is [available on YouTube](https://www.youtube.com/watch?v=qnpSdX3eusk).
-**Lars Bjørnshauge**
+## Global
-Lars Bjørnshauge is the Director of Infrastructure Services for Open Access C.I.C (www.is4oa.org). A true open access champion, Lars is DOAJ’s founder and worked as the Managing Director until 2022. Previously, he has been the Deputy Director and Acting Director for the Technical Information Center of Denmark at the Technical University of Denmark. Lars has also been the Director of Libraries at Lund University in Sweden, and the Director of SPARC Europe. In addition to founding DOAJ, he has also co-founded OpenDOAR (the Directory of Open Access Repositories, DOAB (Directory of Open Access Books), and Think.Check.Submit. Lars was on the OASPA Board from 2012-2019.
+Our second event will be around the theme Global, where we will have eight lighting talks from speakers from around the world. Our moderator and DOAJ Ambassador, Ivonne Lujano, will introduce speakers and manage two Q&As, where the audience can ask our speakers questions. More information about the event and all the speakers can be found on the [registration page](https://us02web.zoom.us/webinar/register/WN_fu42oi59S7GZ366rjyAUGg#/registration).
-**Mikael Laakso**
+## Trusted
-Mikael Laakso is an Associate Professor in Information Systems Science at Hanken School of Economics in Helsinki. He has been researching the changing landscape towards openness in scholarly publishing by studying combinations of bibliometrics, web metrics, business models, science policy, and author behaviour. Since the start of his research in this domain around 2009, DOAJ data has been instrumental to most of his research projects. In addition to research, Mikael has also been active in national and international working groups furthering various dimensions of open science.
-
-**Nadine D. Tulloch-Buckland**
-
-Nadine D. Tulloch-Buckland is the former General Manager of the UWI Press, Senior Lecturer of the University of the West Indies and Director of Spoizer Content Agency Limited. She has over twenty years’ experience in scholarly publishing with specific emphasis on finance and business model development geared towards sustainability in scholarly publishing in the Caribbean. Nadine is the current Treasurer of ALPSP and former Treasurer/Director of AUPresses. She is an advocate for Sustainable Open Access Publishing.
+Our third and last DOAJ at 20 event will be around the theme Trusted. More information about this event will be available later in the year.
## Timeline
@@ -124,6 +113,9 @@ Find out more by [registering with us](https://forms.reform.app/S49aj6/DOAJat20/
You can also follow and join our celebration on social media: #DOAJat20 ([Mastodon](https://masto.ai/tags/DOAJat20) & [Twitter](https://twitter.com/search?q=%23DOAJat20)).
## Support our anniversary campaign
+
+
+
As a crucial open infrastructure, DOAJ relies on donations from supporting organisations to help keep it running to provide the services the community relies on and trusts.
We're running [a fundraising campaign](https://www.paypal.com/giving/campaigns?campaign_id=4VXR4TJ69MDJJ) for our 20th year.
diff --git a/cms/pages/about/index.md b/cms/pages/about/index.md
index dd2d13e588..ba714de17c 100644
--- a/cms/pages/about/index.md
+++ b/cms/pages/about/index.md
@@ -14,7 +14,7 @@ OPEN - DOAJ is a vital part of the global open access infrastructure.
GLOBAL – DOAJ is a global community, with [team members](/about/team/), [ambassadors](/about/ambassadors/) and [volunteers](/about/volunteers/) based in 45 countries around the world, speaking 36 languages.
-TRUSTED – Globally DOAJ's [standards](/apply/guide/#basic-criteria-for-inclusion) have become a gold standard for open access publishing.
+TRUSTED – Globally, DOAJ's [criteria](/apply/guide/#basic-criteria-for-inclusion) have become a gold standard for open access publishing.
## Our mission
@@ -30,7 +30,7 @@ DOAJ works to build an equitable and diverse scholarly ecosystem where trusted r
DOAJ is an independent, non-profit organisation managed by [Infrastructure Services for Open Access C.I.C.](https://is4oa.org/) (IS4OA), a [community interest company](https://en.wikipedia.org/wiki/Community_interest_company) registered in the United Kingdom and with a branch in Denmark.
-DOAJ relies entirely on the voluntary donations of its supporters. Neither DOAJ or IS4OA receive grants or funding from any other source.
+DOAJ relies entirely on the voluntary donations of its supporters. Neither DOAJ nor IS4OA receive grants or funding from any other source.
DOAJ has an [Advisory Board and Council](/about/advisory-board-council), the members of which carry out their duties voluntarily.
@@ -51,19 +51,19 @@ We expect the members of our Team, our volunteers and our Ambassadors to always
## Partnerships and collaborations
-DOAJ partners with many organisations. The nature of the partnerships vary and may include membership, contracts for work, exchanges of information or services, initiative signatories, or access to information resources that assist DOAJ with our application review process.
+DOAJ partners with many organisations. The nature of the partnerships varies and may include membership, contracts for work, exchanges of information or services, initiative signatories, or access to information resources that assist DOAJ with our application review process.
{:.stretch-list}
+ [Creative Commons Global Network](https://network.creativecommons.org/)
- {:.stretch-list__item}
+ {:.stretch-list__item}
++ [Crossref](https://crossref.org/)
+ {:.stretch-list__item}
+ [COPE](https://publicationethics.org/)
{:.stretch-list__item}
+ [Cottage Labs LLP](https://cottagelabs.com/)
{:.stretch-list__item}
+ [DOAB (Directory of Open Access Books)](https://www.doabooks.org/)
{:.stretch-list__item}
-+ [Federation of Finnish Learned Societies](https://tsv.fi/en)
- {:.stretch-list__item}
+ [Helsinki Initiative on Multilingualism](https://www.helsinki-initiative.org/)
{:.stretch-list__item}
+ [ISSN](https://www.issn.org/)
@@ -73,7 +73,9 @@ DOAJ partners with many organisations. The nature of the partnerships vary and m
+ [OASPA](https://oaspa.org/)
{:.stretch-list__item}
+ [OCLC](https://www.oclc.org/en/home.html)
- {:.stretch-list__item}
+ {:.stretch-list__item}
++ [OpenAIRE](https://www.openaire.eu/)
+ {:.stretch-list__item}
+ [Redalyc](https://www.redalyc.org/)
{:.stretch-list__item}
+ [Research4Life](https://www.research4life.org/)
diff --git a/cms/pages/about/team.md b/cms/pages/about/team.md
index 1a5c81ee40..68a3200a48 100644
--- a/cms/pages/about/team.md
+++ b/cms/pages/about/team.md
@@ -8,4 +8,3 @@ featuremap: ~~Team:Fragment->TeamData:Template~~
---
-
diff --git a/cms/pages/apply/guide.md b/cms/pages/apply/guide.md
index 88fa80d849..14bc161758 100644
--- a/cms/pages/apply/guide.md
+++ b/cms/pages/apply/guide.md
@@ -7,7 +7,7 @@ sticky_sidenav: true
featuremap: ~~GuideToApplying:Fragment~~
---
-Before you start the application process, you will be asked to log in or register. You will be able to save your progress and review all your answers before you submit them. A [PDF version of the application form](/static/doaj/docs/2022-09-27-DOAJQuestions.pdf) is available for reference only.
+Before you start the application process, you will be asked to log in or register. You will be able to save your progress and review all your answers before you submit them. A [PDF version of the application form](/static/doaj/docs/2023-07-04-DOAJQuestions.pdf) is available for reference only.
## Basic criteria for inclusion
diff --git a/cms/pages/legal/terms.md b/cms/pages/legal/terms.md
index 8470ed83c5..ad872453eb 100644
--- a/cms/pages/legal/terms.md
+++ b/cms/pages/legal/terms.md
@@ -62,8 +62,16 @@ DOAJ uses a variety of licenses for the different parts of its website and the c
---
+### Licensing terms for content published on DOAJ News Service
+15. DOAJ News Service is the DOAJ blog, hosted on Wordpress: https://blog.doaj.org
+
+16. All content posted on the blog is licensed under the CC BY-NC Creative Commons license. See the blog footer for full details.
+
+---
+
### Version history
-1. **Version 1.3** (20 April 2023) - address and contact details added to Copyright (1). 'Other than as permitted in law...' added to Copyright (2). Notice period added to Conditions of using this website (3). Conditions of using this website (4) rewritten completely to make it clearer that the name and website are protected.
-2. **Version 1.2** (20 January 2022) - corrected ownership from DOAJ to IS4OA in Copyright (1). Simplified the language of Copyright (3). Corrected ownership from DOAJ to IS4OA in Conditions of using this website (4)
-3. **Version 1** (March 2021)
+1. **Version 1.4** (03 July 2023) - added details about the way we license content on our blog, DOAJ News Service.
+2. **Version 1.3** (20 April 2023) - address and contact details added to Copyright (1). 'Other than as permitted in law...' added to Copyright (2). Notice period added to Conditions of using this website (3). Conditions of using this website (4) rewritten completely to make it clearer that the name and website are protected.
+3. **Version 1.2** (20 January 2022) - corrected ownership from DOAJ to IS4OA in Copyright (1). Simplified the language of Copyright (3). Corrected ownership from DOAJ to IS4OA in Conditions of using this website (4)
+4. **Version 1** (March 2021)
diff --git a/cms/sass/base/_general.scss b/cms/sass/base/_general.scss
index 29d2445e24..dc4454d977 100644
--- a/cms/sass/base/_general.scss
+++ b/cms/sass/base/_general.scss
@@ -261,7 +261,20 @@ select {
input[type="checkbox"],
input[type="radio"] {
- display: none;
+ opacity: 0;
+ width: 0.8em;
+ height: 0.8em;
+ margin-left: -0.8rem;
+
+ &:focus + label {
+ outline: dashed 2px lightgrey;
+ outline-offset: 1px;
+ }
+
+ &:focus:not(:focus-visible){
+ outline: none;
+ }
+
+ label {
margin: 0 0 $spacing-03 0;
diff --git a/cms/sass/components/_accordion.scss b/cms/sass/components/_accordion.scss
new file mode 100644
index 0000000000..e066ee02f6
--- /dev/null
+++ b/cms/sass/components/_accordion.scss
@@ -0,0 +1,3 @@
+.accordion:focus-within {
+ border: $grapefruit solid;
+}
\ No newline at end of file
diff --git a/cms/sass/components/_buttons.scss b/cms/sass/components/_buttons.scss
index 1e71d3aceb..061c75c454 100644
--- a/cms/sass/components/_buttons.scss
+++ b/cms/sass/components/_buttons.scss
@@ -117,3 +117,10 @@ button[type="submit"].button--secondary {
color: currentColor;
}
}
+
+button.aria-button {
+ all: inherit;
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ appearance: none;
+}
diff --git a/cms/sass/components/_filters.scss b/cms/sass/components/_filters.scss
index e82883841b..026403d67d 100644
--- a/cms/sass/components/_filters.scss
+++ b/cms/sass/components/_filters.scss
@@ -50,6 +50,8 @@
max-height: $spacing-07;
height: auto;
overflow-y: auto;
+ //add minimal padding to ensure visible outline
+ padding-top: $spacing-01;
@include unstyled-list;
li {
diff --git a/cms/sass/main.scss b/cms/sass/main.scss
index cdd22133b8..9b30911f48 100644
--- a/cms/sass/main.scss
+++ b/cms/sass/main.scss
@@ -28,6 +28,7 @@
"layout/sidenav",
"components/alert",
+ "components/accordion",
"components/back-to-top",
"components/buttons",
"components/card",
diff --git a/doajtest/fixtures/urls.py b/doajtest/fixtures/urls.py
new file mode 100644
index 0000000000..0fa1f98302
--- /dev/null
+++ b/doajtest/fixtures/urls.py
@@ -0,0 +1,18 @@
+VALID_URL_LISTS = [
+ "https://www.sunshine.com",
+ "http://www.moonlight.com",
+ "https://www.cosmos.com#galaxy",
+ "https://www.cosmos.com/galaxy",
+ "https://www.cosmos.com/galaxy#peanut",
+ "http://ftp.example.com/file%20name.txt"
+]
+
+INVALID_URL_LISTS = [
+ "ht:www",
+ "nonexistent.com",
+ "https://www.doaj.org and https://www.reddit.com",
+ "http://www.doaj.org and www.doaj.org",
+"http://www.doaj.org, www.doaj.org",
+"http://www.doaj.org, https://www.doaj.org",
+"http://ftp.example.com/file name.txt"
+]
\ No newline at end of file
diff --git a/doajtest/fixtures/v2/common.py b/doajtest/fixtures/v2/common.py
index c113219c58..efa5f7367b 100644
--- a/doajtest/fixtures/v2/common.py
+++ b/doajtest/fixtures/v2/common.py
@@ -1,11 +1,11 @@
NOTES_FORM_EXPANDED = {
'notes': [
{"note": "Second Note", "note_date": "2014-05-22T00:00:00Z", "note_id": "1234",
- 'note_author': '(fake_account_id__b)',
+ 'note_author': ' (fake_account_id__b)',
"note_author_id": "fake_account_id__b",
},
{"note": "First Note", "note_date": "2014-05-21T14:02:45Z", "note_id": "abcd",
- 'note_author': '(fake_account_id__a)',
+ 'note_author': ' (fake_account_id__a)',
"note_author_id": "fake_account_id__a",
}
]
diff --git a/doajtest/helpers.py b/doajtest/helpers.py
index f65c11f241..1f716bcf3b 100644
--- a/doajtest/helpers.py
+++ b/doajtest/helpers.py
@@ -120,6 +120,7 @@ class DoajTestCase(TestCase):
@classmethod
def setUpClass(cls) -> None:
+ import portality.app # noqa, needed to registing routes
cls.originals = patch_config(app, {
"STORE_IMPL": "portality.store.StoreLocal",
"STORE_LOCAL_DIR": paths.rel2abs(__file__, "..", "tmp", "store", "main", cls.__name__.lower()),
diff --git a/doajtest/testbook/article_metadata_upload_form/article_metadata_upload_form.yml b/doajtest/testbook/article_metadata_upload_form/article_metadata_upload_form.yml
index a63435091d..2e6d391ca1 100644
--- a/doajtest/testbook/article_metadata_upload_form/article_metadata_upload_form.yml
+++ b/doajtest/testbook/article_metadata_upload_form/article_metadata_upload_form.yml
@@ -46,8 +46,7 @@ tests:
- step: Add author's ORCID iD in the wrong format eg "0000-0000-0000-000a"
- step: Click "Add Article" button
results:
- - 'Red error message: ''Invalid ORCID iD. Please enter your ORCID iD as a full
- URL of the form https://orcid.org/0000-0000-0000-0000'''
+ - 'Red error message: ''Invalid ORCID iD. Please enter your ORCID iD structured as: https://orcid.org/0000-0000-0000-0000. URLs must start with https.'''
- step: Enter valid Orcid iD for one of the authors
- step: 'Enter valid Full-Text URL, eg: https://pl.wikipedia.org/wiki/Torun'
- step: Select ISSN print and online version from dropdowns
diff --git a/doajtest/testbook/new_application_form/publishers_form.yml b/doajtest/testbook/new_application_form/publishers_form.yml
index d0eed2aba5..07f1c2e40a 100644
--- a/doajtest/testbook/new_application_form/publishers_form.yml
+++ b/doajtest/testbook/new_application_form/publishers_form.yml
@@ -89,6 +89,7 @@ tests:
- step: Change the value of one ISSN
results:
- All error messages clear.
+ - Under both ISSN fields the link to 'https://portal.issn.org/resource/ISSN/' is displayed
- step: Add keywords
results:
- As you type, you see suggestions in a list below the box
diff --git a/doajtest/testbook/public_site/ToC.yml b/doajtest/testbook/public_site/ToC.yml
new file mode 100644
index 0000000000..b481085cf9
--- /dev/null
+++ b/doajtest/testbook/public_site/ToC.yml
@@ -0,0 +1,15 @@
+suite: Public Site
+testset: ToC
+tests:
+- title: Test Correctly Displayed Discontinued Date
+ context:
+ role: anonymous
+ steps:
+ - step: To prepare to do this test make sure there are 3 journals publically available in DOAJ
+ one with discontinued date in the past
+ one with discontinued date in the future
+ one with discontinued date today
+ - step: Search for every journal from the list above
+ results:
+ - On the ToC of the journal with discontinued date in the past or today - the discontinued date is displayed
+ - On the ToC of the journal with discontinued date in the future - the discontinued date is not displayed
diff --git a/doajtest/testbook/public_site/public_search.yml b/doajtest/testbook/public_site/public_search.yml
index 1bce101f8c..6b47834e85 100644
--- a/doajtest/testbook/public_site/public_search.yml
+++ b/doajtest/testbook/public_site/public_search.yml
@@ -166,3 +166,25 @@ tests:
results:
- You are taken to the full text of this article on the Web. It opens in a new
tab
+- title: 'Test Public Search Results Display: Accessibility'
+ context:
+ role: anonymous
+ steps:
+ - step: Go to the DOAJ search page at /search/articles
+ - step: Turn on a screen reader
+ results:
+ - Extendable facets are focusable and focus is marked with an orange solid border
+ - The screenreader gives the header role ("button")
+ - The screenreader gives the state of the facet ("extended" or "folded")
+ - step: click spacebar to fold/unfold the facet
+ resuts:
+ - screenreader gives correct state of the facet ("extended" or "folded")
+ - step: click tab
+ results:
+ - focus is on the list of checkboxes
+ results:
+ - focus is clearly marked by the outline
+ - step: click spacebar to check the filter
+ results:
+ - filter is applied
+
diff --git a/doajtest/testdrive/__init__.py b/doajtest/testdrive/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/doajtest/testdrive/factory.py b/doajtest/testdrive/factory.py
new file mode 100644
index 0000000000..cd94e90ffb
--- /dev/null
+++ b/doajtest/testdrive/factory.py
@@ -0,0 +1,25 @@
+from portality.lib import plugin
+import random
+import string
+
+
+class TestDrive():
+ def create_random_str(self, n_char=10):
+ s = string.ascii_letters + string.digits
+ return ''.join(random.choices(s, k=n_char))
+
+ def setup(self) -> dict:
+ return {"status": "not implemented"}
+
+ def teardown(self, setup_params) -> dict:
+ return {"status": "not implemented"}
+
+
+class TestFactory():
+ @classmethod
+ def get(cls, test_id):
+ modname = test_id
+ classname = test_id.replace("_", " ").title().replace(" ", "")
+ classpath = "doajtest.testdrive." + modname + "." + classname
+ klazz = plugin.load_class(classpath)
+ return klazz()
\ No newline at end of file
diff --git a/doajtest/testdrive/todo_associate.py b/doajtest/testdrive/todo_associate.py
new file mode 100644
index 0000000000..be877c3a2f
--- /dev/null
+++ b/doajtest/testdrive/todo_associate.py
@@ -0,0 +1,104 @@
+from portality import constants
+from doajtest.testdrive.factory import TestDrive
+from doajtest.fixtures.v2.applications import ApplicationFixtureFactory
+from portality.lib import dates
+from portality import models
+from datetime import datetime
+
+
+class TodoAssociate(TestDrive):
+
+ def setup(self) -> dict:
+ un = self.create_random_str()
+ pw = self.create_random_str()
+ acc = models.Account.make_account(un + "@example.com", un, "TodoAssociate " + un, [constants.ROLE_ASSOCIATE_EDITOR])
+ acc.set_password(pw)
+ acc.save()
+
+ gn = "TodoAssociate Group " + un
+ eg = models.EditorGroup(**{
+ "name": gn
+ })
+ eg.add_associate(acc.id)
+ eg.save()
+
+ apps = build_applications(un)
+
+ return {
+ "account": {
+ "username": acc.id,
+ "password": pw
+ },
+ "editor_group": {
+ "id": eg.id,
+ "name": eg.name
+ },
+ "applications": apps
+ }
+
+ def teardown(self, params) -> dict:
+ models.Account.remove_by_id(params["account"]["username"])
+ models.EditorGroup.remove_by_id(params["editor_group"]["id"])
+ for nature, details in params["applications"].items():
+ for detail in details:
+ models.Application.remove_by_id(detail["id"])
+ return {"status": "success"}
+
+
+def build_applications(un):
+ w = 7 * 24 * 60 * 60
+
+ apps = {}
+
+ app = build_application(un + " Stalled Application", 3 * w, 3 * w,
+ constants.APPLICATION_STATUS_IN_PROGRESS, editor=un)
+ app.save()
+ apps["stalled"] = [{
+ "id": app.id,
+ "title": un + " Stalled Application"
+ }]
+
+ app = build_application(un + " Old Application", 6 * w, 6 * w, constants.APPLICATION_STATUS_IN_PROGRESS,
+ editor=un)
+ app.save()
+ apps["old"] = [{
+ "id": app.id,
+ "title": un + " Old Application"
+ }]
+
+ app = build_application(un + " Pending Application", 1 * w, 1 * w, constants.APPLICATION_STATUS_PENDING,
+ editor=un)
+ app.save()
+ apps["pending"] = [{
+ "id": app.id,
+ "title": un + " Pending Application"
+ }]
+
+ app = build_application(un + " All Other Applications", 2 * w, 2 * w,
+ constants.APPLICATION_STATUS_IN_PROGRESS, editor=un)
+ app.save()
+ apps["all"] = [{
+ "id": app.id,
+ "title": un + " All Other Applications"
+ }]
+
+ return apps
+
+
+def build_application(title, lmu_diff, cd_diff, status, editor=None):
+ source = ApplicationFixtureFactory.make_application_source()
+ ap = models.Application(**source)
+ ap.bibjson().title = title
+ ap.remove_current_journal()
+ ap.remove_related_journal()
+ ap.application_type = constants.APPLICATION_TYPE_NEW_APPLICATION
+ ap.set_id(ap.makeid())
+ ap.set_last_manual_update(dates.before(datetime.utcnow(), lmu_diff))
+ ap.set_created(dates.before(datetime.utcnow(), cd_diff))
+ ap.set_application_status(status)
+
+ if editor is not None:
+ ap.set_editor(editor)
+
+ ap.save()
+ return ap
diff --git a/doajtest/unit/api_tests/test_apiv3_bulk_application.py b/doajtest/unit/api_tests/test_apiv3_bulk_application.py
index 44c0425324..cc43a6bdac 100644
--- a/doajtest/unit/api_tests/test_apiv3_bulk_application.py
+++ b/doajtest/unit/api_tests/test_apiv3_bulk_application.py
@@ -31,7 +31,7 @@ def test_01_create_applications_success(self):
assert len(ids) == 10
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# check that each id was actually created
for _id in ids:
@@ -82,14 +82,14 @@ def test_03_delete_application_success(self):
ids = ApplicationsBulkApi.create(dataset, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# now delete half of them
dels = ids[:5]
ApplicationsBulkApi.delete(dels, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
for _id in dels:
ap = models.Suggestion.pull(_id)
@@ -115,7 +115,7 @@ def test_04_delete_applications_fail(self):
ids = ApplicationsBulkApi.create(dataset, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# call delete on the object in various context that will fail
@@ -138,7 +138,7 @@ def test_04_delete_applications_fail(self):
created = models.Suggestion.pull(ids[3])
created.set_application_status(constants.APPLICATION_STATUS_ACCEPTED)
created.save()
- time.sleep(2)
+ time.sleep(1)
with self.assertRaises(Api400Error):
ApplicationsBulkApi.delete(ids, account)
diff --git a/doajtest/unit/api_tests/test_apiv3_crud_application.py b/doajtest/unit/api_tests/test_apiv3_crud_application.py
index 00f91864e5..8478cacce9 100644
--- a/doajtest/unit/api_tests/test_apiv3_crud_application.py
+++ b/doajtest/unit/api_tests/test_apiv3_crud_application.py
@@ -146,7 +146,7 @@ def test_02_create_application_success(self):
assert "LOCKSS" in preservation.get("service")
assert "A safe place" in preservation.get("service")
- time.sleep(2)
+ time.sleep(1)
s = models.Application.pull(a.id)
assert s is not None
@@ -169,7 +169,7 @@ def test_02a_create_application_success_variations(self):
# check that it got created successfully
assert isinstance(a, models.Application)
- time.sleep(2)
+ time.sleep(1)
s = models.Application.pull(a.id)
assert s is not None
@@ -307,7 +307,7 @@ def test_03a_create_application_dryrun(self):
# call create on the object, with the dry_run flag set
a = ApplicationsCrudApi.create(data, account, dry_run=True)
- time.sleep(2)
+ time.sleep(1)
# now check that the application index remains empty
ss = [x for x in models.Application.iterall()]
@@ -428,7 +428,7 @@ def test_07_retrieve_application_fail(self):
data = ApplicationFixtureFactory.make_update_request_source()
ap = models.Application(**data)
ap.save()
- time.sleep(2)
+ time.sleep(1)
# no user
with self.assertRaises(Api401Error):
@@ -460,7 +460,7 @@ def test_08_update_application_success(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the newly created version for use in assertions later
created = models.Application.pull(a.id)
@@ -475,7 +475,7 @@ def test_08_update_application_success(self):
assert a2 != a
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the updated version
updated = models.Application.pull(a.id)
@@ -497,7 +497,7 @@ def test_09_update_application_fail(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the newly created version for use in assertions later
created = models.Application.pull(a.id)
@@ -526,7 +526,7 @@ def test_09_update_application_fail(self):
# on one with a disallowed workflow status
created.set_application_status(constants.APPLICATION_STATUS_ACCEPTED)
created.save()
- time.sleep(2)
+ time.sleep(1)
account.add_role("publisher")
with self.assertRaises(Api403Error):
@@ -546,13 +546,13 @@ def test_10_delete_application_success(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# now delete it
ApplicationsCrudApi.delete(a.id, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
ap = models.Application.pull(a.id)
assert ap is None
@@ -571,7 +571,7 @@ def test_11_delete_application_fail(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the newly created version for use in test later
created = models.Application.pull(a.id)
@@ -595,7 +595,7 @@ def test_11_delete_application_fail(self):
# on one with a disallowed workflow status
created.set_application_status(constants.APPLICATION_STATUS_ACCEPTED)
created.save()
- time.sleep(2)
+ time.sleep(1)
with self.assertRaises(Api403Error):
ApplicationsCrudApi.delete(a.id, account)
@@ -614,13 +614,13 @@ def test_12_delete_application_dryrun(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# now delete it with the dry run flag
ApplicationsCrudApi.delete(a.id, account, dry_run=True)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
ap = models.Application.pull(a.id)
assert ap is not None
@@ -663,7 +663,7 @@ def test_13_create_application_update_request_success(self):
assert "LOCKSS" in preservation_services
assert "A safe place" in preservation_services, "Expected: 'A safe place', found: {}".format(preservation_services)
- time.sleep(2)
+ time.sleep(1)
s = models.Application.pull(a.id)
assert s is not None
@@ -712,7 +712,7 @@ def test_15_create_application_update_request_dryrun(self):
# call create on the object, with the dry_run flag set
a = ApplicationsCrudApi.create(data, account, dry_run=True)
- time.sleep(2)
+ time.sleep(1)
# now check that the application index remains empty
ss = [x for x in models.Application.iterall()]
@@ -739,7 +739,7 @@ def test_16_update_application_update_request_success(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the newly created version for use in assertions later
created = models.Application.pull(a.id)
@@ -754,7 +754,7 @@ def test_16_update_application_update_request_success(self):
assert a2 != a
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the updated version
updated = models.Application.pull(a.id)
@@ -785,7 +785,7 @@ def test_17_update_application_update_request_fail(self):
a = ApplicationsCrudApi.create(data, account)
# let the index catch up
- time.sleep(2)
+ time.sleep(1)
# get a copy of the newly created version for use in assertions later
created = models.Application.pull(a.id)
@@ -915,3 +915,70 @@ def test_18_applications_currency_validator(self):
data=json.dumps(data))
assert resp.status_code == 400, resp.status_code
assert resp.json['error'].startswith("Field 'title' is required but not present at '[root]bibjson.'")
+
+ def test_19_applications_language_validator(self):
+ """ Ensure we get the correct validation messages via the API """
+ account = models.Account()
+ account.set_id("test")
+ account.set_name("Tester")
+ account.set_email("test@test.com")
+ account.set_role(["publisher", "api"])
+ api_key = account.generate_api_key()
+ account.save(blocking=True)
+
+ data = ApplicationFixtureFactory.incoming_application()
+
+ # Invalid language error comes from the model
+ data['bibjson']['language'][0] = 'mumbling quietly'
+ with self.assertRaises(Api400Error) as e2:
+ ApplicationsCrudApi.create(data, account=account)
+ assert str(e2.exception).startswith("Coerce with 'http://example1.com', 'http://example2.com', ""]
- pd.testing.assert_frame_equal(df_test, df_expected)
-
- def test_select_columns(self):
- columns = ['Journal ID', 'Journal URL']
- df_test = journal_urls.select_columns(self.df, columns)
- df_expected = self.df.loc[:, columns]
- pd.testing.assert_frame_equal(df_test, df_expected)
-
- def test_read_csv(self):
- self.df.to_csv('test_data.csv', index=False)
- df_test = journal_urls.read_csv('test_data.csv')
- pd.testing.assert_frame_equal(df_test, self.df)
-
- def test_generate_html_files(self):
- journal_urls.generate_html_files(self.df, self.file_name_base, self.rows_count)
- for i in range(len(self.df) // self.rows_count):
- file_name = self.file_name_base + f'{i + 1}.html'
- self.assertTrue(os.path.exists(file_name))
-
- def tearDown(self):
- if os.path.exists('test_data.csv'):
- os.remove('test_data.csv')
- for i in range(len(self.df) // self.rows_count):
- file_name = self.file_name_base + f'{i + 1}.html'
- if os.path.exists(file_name):
- os.remove(file_name)
-
-
-class TestLinkCheck(DoajTestCase):
- def setUp(self):
- self.journal_df = pd.DataFrame({
- 'Journal title': ['Journal1', 'Journal2', 'Journal3'],
- 'Added on Date': ['01-01-2021', '02-02-2021', '03-03-2021'],
- 'Last updated Date': ['01-02-2021', '02-03-2021', '03-04-2021'],
- 'Url': ['http://example1.com', 'http://example2.com', 'http://example3.com']
- })
- self.report_df = pd.DataFrame({
- 'url': ['http://example1.com', 'http://example2.com'],
- 'broken_check': ['OK', 'Broken'],
- 'redirect_url': ['http://example1.com', 'http://example2.com'],
- 'redirect_type': ['301', '302']
- })
- self.report_values = pd.DataFrame({
- 'Url': ['http://example1.com', 'http://example2.com'],
- 'BrokenCheck': ['OK', 'Broken'],
- 'RedirectUrl': ['http://example1.com', 'http://example2.com'],
- 'RedirectType': ['301', '302']
- })
-
- def test_fetch_matching_rows(self):
- result = report.fetch_matching_rows(self.journal_df, self.report_df.loc[0].to_dict())
- expected_result = pd.DataFrame({
- 'Journal title': ['Journal1'],
- 'Added on Date': ['01-01-2021'],
- 'Last updated Date': ['01-02-2021'],
- 'Url': ['http://example1.com'],
- 'BrokenCheck': ['OK'],
- 'RedirectUrl': ['http://example1.com'],
- 'RedirectType': ['301']
- })
- pd.testing.assert_frame_equal(result, expected_result)
-
- def test_check_links(self):
- result = report.check_links(self.report_values, self.journal_df)
- expected_result = pd.concat([
- report.fetch_matching_rows(self.journal_df, self.report_df.loc[0].to_dict()),
- report.fetch_matching_rows(self.journal_df, self.report_df.loc[1].to_dict())
- ])
- pd.testing.assert_frame_equal(result, expected_result)
+# import os
+# import pandas as pd
+# from doajtest.helpers import DoajTestCase
+# from portality.scripts import journal_urls, link_checker_report as report
+#
+#
+# class TestCSVtoHTML(DoajTestCase):
+# def setUp(self):
+# self.df = pd.DataFrame({
+# 'Journal ID': [1, 2, 3],
+# 'Journal URL': ['http://example1.com', 'http://example2.com', None],
+# 'URL in DOAJ': ['http://example1.com', 'http://example2.com', None]
+# })
+# self.df.to_csv('test_data.csv', index=False)
+# self.file_name_base = 'test_file'
+# self.rows_count = 2
+#
+# def test_get_csv_file_name(self):
+# csv_file_name = journal_urls.get_csv_file_name()
+# self.assertEqual(csv_file_name, 'doaj_journals_links.csv')
+#
+# def test_add_link(self):
+# df_test = journal_urls.add_link(self.df.copy(), 'Journal URL')
+# df_expected = self.df.copy()
+# df_expected['Journal URL'] = ['http://example1.com', 'http://example2.com', ""]
+# pd.testing.assert_frame_equal(df_test, df_expected)
+#
+# def test_select_columns(self):
+# columns = ['Journal ID', 'Journal URL']
+# df_test = journal_urls.select_columns(self.df, columns)
+# df_expected = self.df.loc[:, columns]
+# pd.testing.assert_frame_equal(df_test, df_expected)
+#
+# def test_read_csv(self):
+# self.df.to_csv('test_data.csv', index=False)
+# df_test = journal_urls.read_csv('test_data.csv')
+# pd.testing.assert_frame_equal(df_test, self.df)
+#
+# def test_generate_html_files(self):
+# journal_urls.generate_html_files(self.df, self.file_name_base, self.rows_count)
+# for i in range(len(self.df) // self.rows_count):
+# file_name = self.file_name_base + f'{i + 1}.html'
+# self.assertTrue(os.path.exists(file_name))
+#
+# def tearDown(self):
+# if os.path.exists('test_data.csv'):
+# os.remove('test_data.csv')
+# for i in range(len(self.df) // self.rows_count):
+# file_name = self.file_name_base + f'{i + 1}.html'
+# if os.path.exists(file_name):
+# os.remove(file_name)
+#
+#
+# class TestLinkCheck(DoajTestCase):
+# def setUp(self):
+# self.journal_df = pd.DataFrame({
+# 'Journal title': ['Journal1', 'Journal2', 'Journal3'],
+# 'Added on Date': ['01-01-2021', '02-02-2021', '03-03-2021'],
+# 'Last updated Date': ['01-02-2021', '02-03-2021', '03-04-2021'],
+# 'Url': ['http://example1.com', 'http://example2.com', 'http://example3.com']
+# })
+# self.report_df = pd.DataFrame({
+# 'url': ['http://example1.com', 'http://example2.com'],
+# 'broken_check': ['OK', 'Broken'],
+# 'redirect_url': ['http://example1.com', 'http://example2.com'],
+# 'redirect_type': ['301', '302']
+# })
+# self.report_values = pd.DataFrame({
+# 'Url': ['http://example1.com', 'http://example2.com'],
+# 'BrokenCheck': ['OK', 'Broken'],
+# 'RedirectUrl': ['http://example1.com', 'http://example2.com'],
+# 'RedirectType': ['301', '302']
+# })
+#
+# def test_fetch_matching_rows(self):
+# result = report.fetch_matching_rows(self.journal_df, self.report_df.loc[0].to_dict())
+# expected_result = pd.DataFrame({
+# 'Journal title': ['Journal1'],
+# 'Added on Date': ['01-01-2021'],
+# 'Last updated Date': ['01-02-2021'],
+# 'Url': ['http://example1.com'],
+# 'BrokenCheck': ['OK'],
+# 'RedirectUrl': ['http://example1.com'],
+# 'RedirectType': ['301']
+# })
+# pd.testing.assert_frame_equal(result, expected_result)
+#
+# def test_check_links(self):
+# result = report.check_links(self.report_values, self.journal_df)
+# expected_result = pd.concat([
+# report.fetch_matching_rows(self.journal_df, self.report_df.loc[0].to_dict()),
+# report.fetch_matching_rows(self.journal_df, self.report_df.loc[1].to_dict())
+# ])
+# pd.testing.assert_frame_equal(result, expected_result)
diff --git a/doajtest/unit/test_task_discontinued_soon.py b/doajtest/unit/test_task_discontinued_soon.py
new file mode 100644
index 0000000000..3716151294
--- /dev/null
+++ b/doajtest/unit/test_task_discontinued_soon.py
@@ -0,0 +1,93 @@
+import unittest
+import datetime
+
+from doajtest.helpers import DoajTestCase, patch_config
+
+from portality import models
+from portality.tasks import find_discontinued_soon
+from portality.ui.messages import Messages
+from doajtest.fixtures import JournalFixtureFactory
+
+# Expect a notification for journals discontinuing in 1 days time (tomorrow)
+DELTA = 1
+
+
+class TestDiscontinuedSoon(DoajTestCase):
+
+ @classmethod
+ def setUpClass(cls) -> None:
+ super().setUpClass()
+ cls.orig_config = patch_config(cls.app_test, {
+ 'DISCONTINUED_DATE_DELTA': DELTA
+ })
+
+ @classmethod
+ def tearDownClass(cls) -> None:
+ super().tearDownClass()
+ patch_config(cls.app_test, cls.orig_config)
+
+ @staticmethod
+ def _date_to_find():
+ return (datetime.datetime.today() + datetime.timedelta(days=DELTA)).strftime('%Y-%m-%d')
+
+ @staticmethod
+ def _date_too_late():
+ return (datetime.datetime.today() + datetime.timedelta(days=DELTA+1)).strftime('%Y-%m-%d')
+
+ def test_discontinued_soon_found(self):
+
+ # Both these should be found
+ journal_discontinued_to_found_1 = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ journal_discontinued_to_found_1.set_id("1")
+ jbib = journal_discontinued_to_found_1.bibjson()
+ jbib.title = "Discontinued Tomorrow 1"
+ jbib.discontinued_date = self._date_to_find()
+ journal_discontinued_to_found_1.save(blocking=True)
+
+ journal_discontinued_to_found_2 = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ journal_discontinued_to_found_2.set_id("2")
+ jbib = journal_discontinued_to_found_2.bibjson()
+ jbib.title = "Discontinued Tomorrow 2"
+ jbib.discontinued_date = self._date_to_find()
+ journal_discontinued_to_found_2.save(blocking=True)
+
+ # that shouldn't be found
+ journal_discontinued_too_late = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ journal_discontinued_too_late.set_id("3")
+ jbib = journal_discontinued_too_late.bibjson()
+ jbib.title = "Discontinued In 2 days"
+ jbib.discontinued_date = self._date_too_late()
+ journal_discontinued_too_late.save(blocking=True)
+
+ job = find_discontinued_soon.FindDiscontinuedSoonBackgroundTask.prepare("system")
+ task = find_discontinued_soon.FindDiscontinuedSoonBackgroundTask(job)
+ task.run()
+
+ assert len(job.audit) == 3 # Journals 1 & 2, and a message to say notification is sent
+ assert job.audit[0]["message"] == Messages.DISCONTINUED_JOURNAL_FOUND_LOG.format(id="1")
+ assert job.audit[1]["message"] == Messages.DISCONTINUED_JOURNAL_FOUND_LOG.format(id="2")
+ assert job.audit[2]["message"] == Messages.DISCONTINUED_JOURNALS_FOUND_NOTIFICATION_SENT_LOG
+
+ def test_discontinued_soon_not_found(self):
+
+ # None of these should be found - this one discontinues in 2 days
+ journal_discontinued_too_late = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ journal_discontinued_too_late.set_id("1")
+ jbib = journal_discontinued_too_late.bibjson()
+ jbib.title = "Discontinued In 2 days"
+ jbib.discontinued_date = self._date_too_late()
+ journal_discontinued_too_late.save(blocking=True)
+
+ # this one is not in doaj
+ journal_not_in_doaj = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=False))
+ journal_not_in_doaj.set_id("2")
+ jbib = journal_not_in_doaj.bibjson()
+ jbib.discontinued_date = self._date_to_find()
+ journal_not_in_doaj.save(blocking=True)
+
+ job = find_discontinued_soon.FindDiscontinuedSoonBackgroundTask.prepare("system")
+ task = find_discontinued_soon.FindDiscontinuedSoonBackgroundTask(job)
+ task.run()
+
+ assert len(job.audit) == 1
+ assert job.audit[0]["message"] == Messages.NO_DISCONTINUED_JOURNALS_FOUND_LOG
diff --git a/doajtest/unit/test_tasks_harvest.py b/doajtest/unit/test_tasks_harvest.py
index 2b9986fd93..7d0110fc8b 100644
--- a/doajtest/unit/test_tasks_harvest.py
+++ b/doajtest/unit/test_tasks_harvest.py
@@ -72,7 +72,7 @@ def test_harvest(self, mock_query):
task = HarvesterBackgroundTask(job)
BackgroundApi.execute(task)
- time.sleep(2)
+ time.sleep(1)
print(job.pretty_audit)
articles_saved = [a for a in self.journal.all_articles()]
@@ -124,7 +124,7 @@ def test_start_multiple(self, mock_query):
assert not mock_query.called, "mock_query was called when it shouldn't have been"
- time.sleep(2)
+ time.sleep(1)
job3 = models.BackgroundJob.pull(job2.id)
assert job3.status == "error", "expected 'error', got '{x}'".format(x=job3.status)
diff --git a/doajtest/unit/test_tasks_ingestCrossref442Articles.py b/doajtest/unit/test_tasks_ingestCrossref442Articles.py
index 8586c437f9..2714b33644 100644
--- a/doajtest/unit/test_tasks_ingestCrossref442Articles.py
+++ b/doajtest/unit/test_tasks_ingestCrossref442Articles.py
@@ -752,13 +752,13 @@ def test_26_run_validated(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -798,13 +798,13 @@ def test_27_run_exists(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -842,7 +842,7 @@ def test_29_submit_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
# scheduling does not result in immidiate execution for huey version > 2
# always eager mode is replaced by immediate mode
@@ -877,13 +877,13 @@ def test_31_crossref_run_fail_unmatched_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -928,13 +928,13 @@ def test_32_run_crossref_fail_shared_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -980,13 +980,13 @@ def test_33_run_fail_unowned_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1028,13 +1028,13 @@ def test_34_crossref_journal_2_article_2_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1082,13 +1082,13 @@ def test_35_crossref_journal_2_article_1_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1134,13 +1134,13 @@ def test_37_crossref_journal_1_article_1_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1180,13 +1180,13 @@ def test_38_crossref_journal_2_article_2_1_different_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1234,13 +1234,13 @@ def test_39_crossref_2_journals_different_owners_both_issns_fail(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1291,13 +1291,13 @@ def test_40_crossref_2_journals_different_owners_issn_each_fail(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1355,13 +1355,13 @@ def test_41_crossref_2_journals_same_owner_issn_each_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1413,13 +1413,13 @@ def test_42_crossref_2_journals_different_owners_different_issns_mixed_article_f
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1475,7 +1475,7 @@ def test_43_duplication(self):
self.cleanup_ids.append(id2)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task1 = ingestarticles.IngestArticlesBackgroundTask(job1)
task2 = ingestarticles.IngestArticlesBackgroundTask(job2)
@@ -1484,7 +1484,7 @@ def test_43_duplication(self):
task2.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu1 = models.FileUpload.pull(id1)
fu2 = models.FileUpload.pull(id2)
@@ -1529,13 +1529,13 @@ def test_44_journal_1_article_1_superlong_noclip(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
@@ -1588,13 +1588,13 @@ def test_45_crossref_journal_1_article_1_superlong_clip(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1641,13 +1641,13 @@ def test_46_one_journal_one_article_2_issns_one_unknown(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1698,13 +1698,13 @@ def test_47_crossref_lcc_spelling_error(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1749,13 +1749,13 @@ def test_48_crossref_unknown_journal_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1860,21 +1860,16 @@ def test_49_1_determine_issns_types(self):
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
models.FileUpload.block(id)
- # because file upload gets created and saved by prepare
- # time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
- # because file upload needs to be re-saved
- # time.sleep(2)
-
fu = models.FileUpload.pull(id)
assert fu.status == "processed", "expected 'processed', received: {}, , error code: {}, for: {}".format(file_upload.status, file_upload.error, m)
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
found = [a for a in models.Article.find_by_issns(["9876-5432", "1234-5678"])]
@@ -2050,20 +2045,20 @@ def test_52_html_tags_in_title_text(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed", "fu.status expected processed, received: {}".format(fu.status)
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
found = [a for a in models.Article.find_by_issns(["9876-5432", "1234-5678"])]
diff --git a/doajtest/unit/test_tasks_ingestCrossref531Articles.py b/doajtest/unit/test_tasks_ingestCrossref531Articles.py
index be64d22c8e..27308a3d22 100644
--- a/doajtest/unit/test_tasks_ingestCrossref531Articles.py
+++ b/doajtest/unit/test_tasks_ingestCrossref531Articles.py
@@ -764,13 +764,13 @@ def test_26_run_validated(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -810,13 +810,13 @@ def test_27_run_exists(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -855,7 +855,7 @@ def test_29_submit_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
# scheduling does not result in immidiate execution for huey version > 2
# always eager mode is replaced by immediate mode
@@ -896,13 +896,13 @@ def test_29_submit_multiple_affs(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
found = [a for a in models.Article.find_by_issns(["1234-5678"])]
assert len(found) == 1
diff --git a/doajtest/unit/test_tasks_ingestDOAJarticles.py b/doajtest/unit/test_tasks_ingestDOAJarticles.py
index cc8e0efeba..2872124a47 100644
--- a/doajtest/unit/test_tasks_ingestDOAJarticles.py
+++ b/doajtest/unit/test_tasks_ingestDOAJarticles.py
@@ -712,13 +712,13 @@ def test_26_run_validated(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -749,13 +749,13 @@ def test_27_run_exists(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -801,7 +801,7 @@ def test_29_submit_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
# this assumes that huey is in always eager mode, and thus this immediately calls the async task,
# which in turn calls execute, which ultimately calls run
@@ -838,13 +838,13 @@ def test_31_doaj_run_fail_unmatched_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -889,13 +889,13 @@ def test_32_run_doaj_fail_shared_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -941,13 +941,13 @@ def test_33_run_fail_unowned_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -983,13 +983,13 @@ def test_34_doaj_journal_2_article_2_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1031,13 +1031,13 @@ def test_35_doaj_journal_2_article_1_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1077,13 +1077,13 @@ def test_37_doaj_journal_1_article_1_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1125,13 +1125,13 @@ def test_38_doaj_journal_2_article_2_1_different_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1180,13 +1180,13 @@ def test_39_doaj_2_journals_different_owners_both_issns_fail(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1236,13 +1236,13 @@ def test_40_doaj_2_journals_different_owners_issn_each_fail(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1291,13 +1291,13 @@ def test_41_doaj_2_journals_same_owner_issn_each_success(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1347,13 +1347,13 @@ def test_42_doaj_2_journals_different_owners_different_issns_mixed_article_fail(
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1401,7 +1401,7 @@ def test_43_doaj_duplication(self):
self.cleanup_ids.append(id2)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task1 = ingestarticles.IngestArticlesBackgroundTask(job1)
task2 = ingestarticles.IngestArticlesBackgroundTask(job2)
@@ -1410,7 +1410,7 @@ def test_43_doaj_duplication(self):
task2.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu1 = models.FileUpload.pull(id1)
fu2 = models.FileUpload.pull(id2)
@@ -1446,13 +1446,13 @@ def test_44_doaj_journal_1_article_1_superlong_noclip(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1494,13 +1494,13 @@ def test_doaj_45_journal_1_article_1_superlong_clip(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1543,13 +1543,13 @@ def test_46_doaj_one_journal_one_article_2_issns_one_unknown(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
@@ -1592,13 +1592,13 @@ def test_47_doaj_lcc_spelling_error(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None, 'expected FileUpload is not None, received: {}'.format(fu)
@@ -1642,13 +1642,13 @@ def test_48_doaj_unknown_journal_issn(self):
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
- time.sleep(2)
+ time.sleep(1)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
- time.sleep(2)
+ time.sleep(1)
fu = models.FileUpload.pull(id)
assert fu is not None
diff --git a/doajtest/unit/test_tasks_sitemap.py b/doajtest/unit/test_tasks_sitemap.py
index 64c846f848..9a2b2476bf 100644
--- a/doajtest/unit/test_tasks_sitemap.py
+++ b/doajtest/unit/test_tasks_sitemap.py
@@ -27,7 +27,7 @@ def test_01_sitemap(self):
job = sitemap.SitemapBackgroundTask.prepare(user)
task = sitemap.SitemapBackgroundTask(job)
BackgroundApi.execute(task)
- time.sleep(2)
+ time.sleep(1)
assert len(self.mainStore.list(self.container_id)) == 1
def test_prepare__queue_id(self):
diff --git a/doajtest/unit/test_tick.py b/doajtest/unit/test_tick.py
index fb67b15528..11a813ff31 100644
--- a/doajtest/unit/test_tick.py
+++ b/doajtest/unit/test_tick.py
@@ -47,7 +47,7 @@ def setUp(self):
# Refresh the type to force changes in the index, then wait for it to be done
models.Journal.refresh()
models.Suggestion.refresh()
- time.sleep(2)
+ time.sleep(1)
def tearDown(self):
super(TestTick, self).tearDown()
diff --git a/doajtest/unit/test_withdraw_reinstate.py b/doajtest/unit/test_withdraw_reinstate.py
index b1928927d6..9c4464ceb0 100644
--- a/doajtest/unit/test_withdraw_reinstate.py
+++ b/doajtest/unit/test_withdraw_reinstate.py
@@ -45,12 +45,12 @@ def test_01_withdraw_task(self):
UPDATE_REQUEST_SOURCE_TEST_1 = ApplicationFixtureFactory.make_update_request_source()
application = models.Application(**UPDATE_REQUEST_SOURCE_TEST_1)
- time.sleep(2)
+ time.sleep(1)
job = SetInDOAJBackgroundTask.prepare(account.id, journal_ids=ids, in_doaj=False)
SetInDOAJBackgroundTask.submit(job)
- time.sleep(2)
+ time.sleep(1)
for id in ids:
j = models.Journal.pull(id)
@@ -76,12 +76,12 @@ def test_02_reinstate_task(self):
a.save()
articles.append(a.id)
- time.sleep(2)
+ time.sleep(1)
job = SetInDOAJBackgroundTask.prepare("testuser", journal_ids=ids, in_doaj=True)
SetInDOAJBackgroundTask.submit(job)
- time.sleep(2)
+ time.sleep(1)
for id in ids:
j = models.Journal.pull(id)
@@ -113,7 +113,7 @@ def test_03_withdraw(self):
a.save()
articles.append(a.id)
- time.sleep(2)
+ time.sleep(1)
change_in_doaj(ids, False)
@@ -149,7 +149,7 @@ def test_04_reinstate(self):
a.save()
articles.append(a.id)
- time.sleep(2)
+ time.sleep(1)
change_in_doaj(ids, True)
@@ -176,12 +176,12 @@ def test_05_withdraw_with_ur(self):
application = models.Application(**UPDATE_REQUEST_SOURCE)
application.save()
- time.sleep(2)
+ time.sleep(1)
job = SetInDOAJBackgroundTask.prepare(account.id, journal_ids=[j.id], in_doaj=False)
SetInDOAJBackgroundTask.submit(job)
- time.sleep(2)
+ time.sleep(1)
j = models.Journal.pull(j.id)
assert j.is_in_doaj() is False
diff --git a/portality/api/current/data_objects/application.py b/portality/api/current/data_objects/application.py
index ff9319f179..a88e9ff32c 100644
--- a/portality/api/current/data_objects/application.py
+++ b/portality/api/current/data_objects/application.py
@@ -66,6 +66,10 @@
"structs": {
"bibjson": {
+ "lists": {
+ # override for lax language enforcement in the core, making it strict for incoming applications
+ "language": {"contains": "field", "coerce": "isolang_2letter_strict"}
+ },
"required": [
"copyright",
"deposit_policy",
diff --git a/portality/api/current/data_objects/article.py b/portality/api/current/data_objects/article.py
index d3fca1493d..77bf6a7d7d 100644
--- a/portality/api/current/data_objects/article.py
+++ b/portality/api/current/data_objects/article.py
@@ -242,7 +242,7 @@ def custom_validate(self):
# check if orcid id is valid
for author in self.bibjson.author:
if author.orcid_id is not None and regex.ORCID_COMPILED.match(author.orcid_id) is None:
- raise dataobj.DataStructureException("Invalid ORCID iD format. Please use url format, eg: https://orcid.org/0001-1111-1111-1111")
+ raise dataobj.DataStructureException("Invalid ORCID iD. Please enter your ORCID iD structured as: https://orcid.org/0000-0000-0000-0000. URLs must start with https.")
for x in self.bibjson.identifier:
if x.type == "doi":
diff --git a/portality/app.py b/portality/app.py
index 5e226220b7..9f6b4d2466 100644
--- a/portality/app.py
+++ b/portality/app.py
@@ -47,6 +47,9 @@
from portality.lib.normalise import normalise_doi
from portality.view.dashboard import blueprint as dashboard
+if app.config.get("DEBUG", False) and app.config.get("TESTDRIVE_ENABLED", False):
+ from portality.view.testdrive import blueprint as testdrive
+
app.register_blueprint(account, url_prefix='/account') #~~->Account:Blueprint~~
app.register_blueprint(admin, url_prefix='/admin') #~~-> Admin:Blueprint~~
app.register_blueprint(publisher, url_prefix='/publisher') #~~-> Publisher:Blueprint~~
@@ -76,6 +79,10 @@
app.register_blueprint(atom) # ~~-> Atom:Blueprint~~
app.register_blueprint(doaj) # ~~-> DOAJ:Blueprint~~
+if app.config.get("DEBUG", False) and app.config.get("TESTDRIVE_ENABLED", False):
+ app.logger.warning('Enabling TESTDRIVE at /testdrive')
+ app.register_blueprint(testdrive, url_prefix="/testdrive") # ~~-> Testdrive:Feature ~~
+
# initialise the index - don't put into if __name__ == '__main__' block,
# because that does not run if gunicorn is loading the app, as opposed
# to the app being run directly by python portality/app.py
@@ -278,6 +285,10 @@ def form_diff_table_subject_expand(val):
return ", ".join(results)
+@app.template_filter("is_in_the_past")
+def is_in_the_past(dttm):
+ return dates.is_before(dttm, dates.today())
+
#######################################################
diff --git a/portality/bll/services/background_task_status.py b/portality/bll/services/background_task_status.py
index eb60ddeaa8..486fdb1d84 100644
--- a/portality/bll/services/background_task_status.py
+++ b/portality/bll/services/background_task_status.py
@@ -61,7 +61,8 @@ def create_queued_status(self, action, total=2, oldest=1200, **_) -> dict:
err_msgs = []
limited_oldest_date = dates.before_now(oldest)
if oldest_job and oldest_job.created_timestamp < limited_oldest_date:
- err_msgs.append('outdated job found. created_timestamp[{} < {}]'.format(
+ err_msgs.append('outdated queued job found[{}]. created_timestamp[{} < {}]'.format(
+ oldest_job.id,
oldest_job.created_timestamp,
limited_oldest_date
))
diff --git a/portality/bll/services/events.py b/portality/bll/services/events.py
index 2b27b85beb..3c5e96473c 100644
--- a/portality/bll/services/events.py
+++ b/portality/bll/services/events.py
@@ -21,6 +21,7 @@
from portality.events.consumers.journal_editor_group_assigned_notify import JournalEditorGroupAssignedNotify
from portality.events.consumers.application_publisher_inprogress_notify import ApplicationPublisherInprogressNotify
from portality.events.consumers.update_request_publisher_rejected_notify import UpdateRequestPublisherRejectedNotify
+from portality.events.consumers.journal_discontinuing_soon_notify import JournalDiscontinuingSoonNotify
class EventsService(object):
@@ -44,7 +45,8 @@ class EventsService(object):
JournalEditorGroupAssignedNotify,
UpdateRequestPublisherAcceptedNotify,
UpdateRequestPublisherAssignedNotify,
- UpdateRequestPublisherRejectedNotify
+ UpdateRequestPublisherRejectedNotify,
+ JournalDiscontinuingSoonNotify
]
def __init__(self):
diff --git a/portality/constants.py b/portality/constants.py
index ec908a4e82..ce7ed5e406 100644
--- a/portality/constants.py
+++ b/portality/constants.py
@@ -53,7 +53,9 @@
EVENT_APPLICATION_EDITOR_GROUP_ASSIGNED = "application:editor_group:assigned"
EVENT_JOURNAL_ASSED_ASSIGNED = "journal:assed:assigned"
EVENT_JOURNAL_EDITOR_GROUP_ASSIGNED = "journal:editor_group:assigned"
+EVENT_JOURNAL_DISCONTINUING_SOON = "journal:discontinuing_soon"
+NOTIFICATION_CLASSIFICATION_STATUS = "alert"
NOTIFICATION_CLASSIFICATION_STATUS_CHANGE = "status_change"
NOTIFICATION_CLASSIFICATION_ASSIGN = "assign"
NOTIFICATION_CLASSIFICATION_CREATE = "create"
diff --git a/portality/events/combined.py b/portality/events/combined.py
new file mode 100644
index 0000000000..869d63ab88
--- /dev/null
+++ b/portality/events/combined.py
@@ -0,0 +1,11 @@
+from portality.events.shortcircuit import send_event as shortcircuit_send_event
+from portality.core import app
+
+
+def send_event(event):
+ try:
+ from portality.events.kafka_producer import send_event as kafka_send_event
+ kafka_send_event(event)
+ except Exception as e:
+ app.logger.exception("Failed to send event to Kafka. " + str(e))
+ shortcircuit_send_event(event)
diff --git a/portality/events/consumers/journal_discontinuing_soon_notify.py b/portality/events/consumers/journal_discontinuing_soon_notify.py
new file mode 100644
index 0000000000..11da31bb96
--- /dev/null
+++ b/portality/events/consumers/journal_discontinuing_soon_notify.py
@@ -0,0 +1,55 @@
+# ~~JournalDiscontinuingSoonNotify:Consumer~~
+import json
+import urllib.parse
+
+from portality.util import url_for
+from portality.events.consumer import EventConsumer
+from portality.core import app
+from portality import constants
+from portality import models
+from portality.bll import DOAJ, exceptions
+from portality.lib import edges
+from portality import dao
+
+class JournalDiscontinuingSoonNotify(EventConsumer):
+ ID = "journal:assed:discontinuing_soon:notify"
+
+ @classmethod
+ def consumes(cls, event):
+ return event.id == constants.EVENT_JOURNAL_DISCONTINUING_SOON and \
+ event.context.get("journal") is not None and \
+ event.context.get("discontinue_date") is not None
+
+ @classmethod
+ def consume(cls, event):
+ journal_id = event.context.get("journal")
+ discontinued_date = event.context.get("discontinue_date")
+
+ journal = models.Journal.pull(journal_id)
+ if journal is None:
+ return
+
+ if not journal.editor_group:
+ return
+
+ eg = models.EditorGroup.pull_by_key("name", journal.editor_group)
+ managing_editor = eg.maned
+ if not managing_editor:
+ return
+
+ # ~~-> Notifications:Service ~~
+ svc = DOAJ.notificationsService()
+
+ notification = models.Notification()
+ notification.who = managing_editor
+ notification.created_by = cls.ID
+ notification.classification = constants.NOTIFICATION_CLASSIFICATION_STATUS
+ notification.long = svc.long_notification(cls.ID).format(
+ days=app.config.get('DISCONTINUED_DATE_DELTA',0),
+ title=journal.bibjson().title,
+ id=journal.id
+ )
+ notification.short = svc.short_notification(cls.ID)
+ notification.action = url_for("admin.journal_page", journal_id=journal.id)
+
+ svc.notify(notification)
diff --git a/portality/events/kafka_consumer.py b/portality/events/kafka_consumer.py
index 77c812b6e2..0ce1e1120e 100644
--- a/portality/events/kafka_consumer.py
+++ b/portality/events/kafka_consumer.py
@@ -11,13 +11,19 @@
app = faust.App('events', broker=broker, value_serializer='json')
topic = app.topic(topic_name)
+event_counter = 0
+
@app.agent(topic)
async def handle_event(stream):
+ global event_counter
with doajapp.test_request_context("/"):
svc = DOAJ.eventsService()
async for event in stream:
- svc.consume(Event(raw=json.loads(event)))
+ event_counter += 1
+ doajapp.logger.info(f"Kafka event count {event_counter}")
+ # TODO uncomment the following line once the Event model is fixed to Kafka
+ # svc.consume(Event(raw=json.loads(event)))
if __name__ == '__main__':
diff --git a/portality/forms/application_forms.py b/portality/forms/application_forms.py
index c290533678..0065b765fc 100644
--- a/portality/forms/application_forms.py
+++ b/portality/forms/application_forms.py
@@ -37,7 +37,8 @@
OwnerExists,
NoScriptTag,
Year,
- CurrentISOCurrency
+ CurrentISOCurrency,
+ CurrentISOLanguage
)
from portality.lib import dates
from portality.lib.formulaic import Formulaic, WTFormsBuilder, FormulaicContext, FormulaicField
@@ -250,7 +251,8 @@ class FieldDefinitions:
],
"widgets" : [
"trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
- "full_contents" # ~~^->FullContents:FormWidget~~
+ "full_contents", # ~~^->FullContents:FormWidget~~
+ "issn_link" # ~~^->IssnLink:FormWidget~~
],
"contexts": {
"public" : {
@@ -320,7 +322,8 @@ class FieldDefinitions:
],
"widgets" : [
"trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
- "full_contents" # ~~^->FullContents:FormWidget~~
+ "full_contents", # ~~^->FullContents:FormWidget~~
+ "issn_link" # ~~^->IssnLink:FormWidget~~
],
"contexts": {
"public" : {
@@ -420,7 +423,8 @@ class FieldDefinitions:
"initial": 5
},
"validate": [
- {"required": {"message": "Enter at least one language"}}
+ {"required": {"message": "Enter at least one language"}},
+ "current_iso_language"
],
"widgets": [
{"select": {}},
@@ -1364,8 +1368,7 @@ class FieldDefinitions:
{"field": "deposit_policy", "value": "other"}],
"help": {
"doaj_criteria": "You must provide a URL",
- "short_help": "Link to the policy in a directory or on the "
- "publisher’s site",
+ "short_help": "Provide the link to the policy in the selected directory. Or select 'Other' and provide a link to the information on your website.",
"placeholder": "https://www.my-journal.com/about#repository_policy"
},
"validate": [
@@ -2853,6 +2856,16 @@ def render(settings, html_attrs):
def wtforms(field, settings):
return CurrentISOCurrency(settings.get("message"))
+
+class CurrentISOLanguageBuilder:
+ @staticmethod
+ def render(settings, html_attrs):
+ pass
+
+ @staticmethod
+ def wtforms(field, settings):
+ return CurrentISOLanguage(settings.get("message"))
+
#########################################################
# Crosswalks
#########################################################
@@ -2915,7 +2928,8 @@ def wtforms(field, settings):
"owner_exists" : OwnerExistsBuilder.wtforms,
"no_script_tag": NoScriptTagBuilder.wtforms,
"year": YearBuilder.wtforms,
- "current_iso_currency": CurrentISOCurrencyBuilder.wtforms
+ "current_iso_currency": CurrentISOCurrencyBuilder.wtforms,
+ "current_iso_language": CurrentISOLanguageBuilder.wtforms
}
}
}
@@ -2933,7 +2947,8 @@ def wtforms(field, settings):
"full_contents" : "formulaic.widgets.newFullContents", # ~~^->FullContents:FormWidget~~
"load_editors" : "formulaic.widgets.newLoadEditors", # ~~-> LoadEditors:FormWidget~~
"trim_whitespace" : "formulaic.widgets.newTrimWhitespace", # ~~-> TrimWhitespace:FormWidget~~
- "note_modal" : "formulaic.widgets.newNoteModal" # ~~-> NoteModal:FormWidget~~
+ "note_modal" : "formulaic.widgets.newNoteModal", # ~~-> NoteModal:FormWidget~~,
+ "issn_link" : "formulaic.widgets.newIssnLink" # ~~-> IssnLink:FormWidget~~,
}
diff --git a/portality/forms/article_forms.py b/portality/forms/article_forms.py
index abecdc55f8..e15f44f32e 100644
--- a/portality/forms/article_forms.py
+++ b/portality/forms/article_forms.py
@@ -516,7 +516,7 @@ def _render_checkbox(self, field, **kwargs):
EMAIL_CONFIRM_ERROR = 'Please double check the email addresses - they do not match.'
DATE_ERROR = "Date must be supplied in the form YYYY-MM-DD"
DOI_ERROR = 'Invalid DOI. A DOI can optionally start with a prefix (such as "doi:"), followed by "10." and the remainder of the identifier'
-ORCID_ERROR = "Invalid ORCID iD. Please enter your ORCID iD as a full URL of the form https://orcid.org/0000-0000-0000-0000"
+ORCID_ERROR = "Invalid ORCID iD. Please enter your ORCID iD structured as: https://orcid.org/0000-0000-0000-0000. URLs must start with https."
IDENTICAL_ISSNS_ERROR = "The Print and Online ISSNs supplied are identical. If you supply 2 ISSNs they must be different."
start_year = app.config.get("METADATA_START_YEAR", dates.now().year - 15)
diff --git a/portality/forms/validate.py b/portality/forms/validate.py
index d1d78ec890..c7dd02cda2 100644
--- a/portality/forms/validate.py
+++ b/portality/forms/validate.py
@@ -12,6 +12,7 @@
from datetime import datetime
from portality import regex
from portality.datasets import get_currency_code
+from portality.lib import isolang
class MultiFieldValidator(object):
@@ -642,3 +643,16 @@ def __call__(self, form, field):
check = get_currency_code(field.data, fail_if_not_found=True)
if check is None:
raise validators.ValidationError(self.message)
+
+
+class CurrentISOLanguage(object):
+ def __init__(self, message=None):
+ if not message:
+ message = "Language is not in the currently supported ISO list"
+ self.message = message
+
+ def __call__(self, form, field):
+ if field.data is not None and field.data != '':
+ check = isolang.find(field.data)
+ if check is None:
+ raise validators.ValidationError(self.message)
diff --git a/portality/lib/coerce.py b/portality/lib/coerce.py
index b8067a24ec..a07976b9d6 100644
--- a/portality/lib/coerce.py
+++ b/portality/lib/coerce.py
@@ -24,7 +24,7 @@ def datify(val):
return datify
-def to_isolang(output_format=None):
+def to_isolang(output_format=None, fail_if_not_found=True):
"""
:param output_format: format from input source to putput. Must be one of:
* alpha3
@@ -33,6 +33,7 @@ def to_isolang(output_format=None):
* name
* fr
Can be a list in order of preference, too
+ :param fail_if_not_found: Whether to raise ValueError if there's no match or return the input unchanged
~~-> Languages:Data~~
:return:
"""
@@ -49,8 +50,15 @@ def isolang(val):
if val is None:
return None
l = dataset.find(val)
+
+ # If we didn't find the language, either raise an error or return the provided value
if l is None:
- raise ValueError("Unable to find iso code for language {x}".format(x=val))
+ if fail_if_not_found is True:
+ raise ValueError("Unable to find iso code for language {x}".format(x=val))
+ else:
+ return val
+
+ # Retrieve the correct output format from a successful match
for f in output_format:
v = l.get(f)
if v is None or v == "":
@@ -64,6 +72,7 @@ def to_currency_code(fail_if_not_found=True):
"""
~~-> Currencies:Data~~
:param val:
+ :param fail_if_not_found:
:return:
"""
def codify(val):
@@ -129,9 +138,10 @@ def to_issn(issn):
"utcdatetimemicros" : date_str(out_format=FMT_DATETIME_MS_STD),
"bigenddate" : date_str(out_format=FMT_DATE_STD),
"isolang": to_isolang(),
- "isolang_2letter": to_isolang(output_format="alpha2"),
+ "isolang_2letter_strict": to_isolang(output_format="alpha2", fail_if_not_found=True),
+ "isolang_2letter_lax": to_isolang(output_format="alpha2", fail_if_not_found=False),
"country_code": to_country_code,
"issn" : to_issn,
"currency_code_strict": to_currency_code(fail_if_not_found=True),
"currency_code_lax": to_currency_code(fail_if_not_found=False)
-}
\ No newline at end of file
+}
diff --git a/portality/lib/csv_utils.py b/portality/lib/csv_utils.py
new file mode 100644
index 0000000000..c5a46f37fd
--- /dev/null
+++ b/portality/lib/csv_utils.py
@@ -0,0 +1,9 @@
+import csv
+from typing import Iterable, Union
+
+
+def read_all(csv_path, as_dict=False) -> Iterable[Union[list, dict]]:
+ reader = csv.DictReader if as_dict else csv.reader
+ with open(csv_path, 'r') as f:
+ for row in reader(f):
+ yield row
diff --git a/portality/lib/dates.py b/portality/lib/dates.py
index a1af162b0b..52f6b0a809 100644
--- a/portality/lib/dates.py
+++ b/portality/lib/dates.py
@@ -119,15 +119,27 @@ def before_now(seconds: int) -> datetime:
return before(now(), seconds)
-def after(timestamp, seconds) -> datetime:
+def seconds_after(timestamp, seconds) -> datetime:
return timestamp + timedelta(seconds=seconds)
+def seconds_after_now(seconds: int):
+ return seconds_after(datetime.utcnow(), seconds)
+
+
+def days_after(timestamp, days):
+ return timestamp + timedelta(days=days)
+
+
+def days_after_now(days: int):
+ return days_after(datetime.utcnow(), days)
+
+
def eta(since, sofar, total) -> str:
td = (now() - since).total_seconds()
spr = float(td) / float(sofar)
alltime = int(math.ceil(total * spr))
- fin = after(since, alltime)
+ fin = seconds_after(since, alltime)
return format(fin)
@@ -163,3 +175,13 @@ def day_ranges(fro: datetime, to: datetime) -> 'list[str]':
def human_date(stamp, string_format=FMT_DATE_HUMAN) -> str:
return reformat(stamp, out_format=string_format)
+
+def is_before(mydate, comparison=None):
+ if comparison is None:
+ comparison = datetime.utcnow()
+ if isinstance(mydate, str):
+ mydate = parse(mydate)
+ if isinstance(comparison, str):
+ comparison = parse(comparison)
+ return mydate < comparison
+
diff --git a/portality/migrate/20180106_1463_ongoing_updates/sync_journals_applications.py b/portality/migrate/20180106_1463_ongoing_updates/sync_journals_applications.py
index adb85410aa..4315618d51 100644
--- a/portality/migrate/20180106_1463_ongoing_updates/sync_journals_applications.py
+++ b/portality/migrate/20180106_1463_ongoing_updates/sync_journals_applications.py
@@ -52,7 +52,7 @@
app_created = application.created_timestamp
for journal in related_journals:
almu = application.last_manual_update_timestamp
- almu_adjusted = dates.after(almu, 3600)
+ almu_adjusted = dates.seconds_after(almu, 3600)
# do a load of reporting prep
jc_ac_diff = int((journal.created_timestamp - app_created).total_seconds())
diff --git a/portality/migrate/903_remove_blanks/README.md b/portality/migrate/903_remove_blanks/README.md
new file mode 100644
index 0000000000..833c2c102f
--- /dev/null
+++ b/portality/migrate/903_remove_blanks/README.md
@@ -0,0 +1,13 @@
+# Remove Blank
+
+remove blank from start or end of string in Journal and Application
+
+### Run
+```
+python portality/upgrade.py -u portality/migrate/903_remove_blanks/migrate.json
+```
+
+### verify
+```
+python -m portality.scripts.blank_field_finder
+```
\ No newline at end of file
diff --git a/portality/migrate/903_remove_blanks/__init__.py b/portality/migrate/903_remove_blanks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/portality/migrate/903_remove_blanks/functions.py b/portality/migrate/903_remove_blanks/functions.py
new file mode 100644
index 0000000000..f36c435314
--- /dev/null
+++ b/portality/migrate/903_remove_blanks/functions.py
@@ -0,0 +1,21 @@
+def remove_blanks(obj) -> dict:
+ if not isinstance(obj, dict):
+ return obj
+
+ for k, v in obj.items():
+ if isinstance(v, dict):
+ obj[k] = remove_blanks(v)
+
+ elif isinstance(v, list):
+ if not v:
+ continue
+ if isinstance(v[0], dict):
+ obj[k] = [remove_blanks(item) for item in v]
+ elif isinstance(v[0], str):
+ obj[k] = [item.strip() for item in v]
+
+ elif isinstance(v, str) and v != v.strip():
+ print(f'remove blanks: {k} = [{v}]')
+ obj[k] = v.strip()
+
+ return obj
diff --git a/portality/migrate/903_remove_blanks/migrate.json b/portality/migrate/903_remove_blanks/migrate.json
new file mode 100644
index 0000000000..64d4a31842
--- /dev/null
+++ b/portality/migrate/903_remove_blanks/migrate.json
@@ -0,0 +1,13 @@
+{
+ "batch" : 10000,
+ "types": [
+ {
+ "type" : "journal",
+ "init_with_model" : false,
+ "keepalive" : "10m",
+ "functions" : [
+ "portality.migrate.903_remove_blanks.functions.remove_blanks"
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/portality/models/v2/journal.py b/portality/models/v2/journal.py
index 41c50a7ce9..ac1ce42585 100644
--- a/portality/models/v2/journal.py
+++ b/portality/models/v2/journal.py
@@ -1131,4 +1131,4 @@ def query(self):
"sort" : [
{"created_date" : {"order" : "desc"}}
]
- }
\ No newline at end of file
+ }
diff --git a/portality/models/v2/shared_structs.py b/portality/models/v2/shared_structs.py
index a3bbcb3d86..6c2c031af1 100644
--- a/portality/models/v2/shared_structs.py
+++ b/portality/models/v2/shared_structs.py
@@ -17,7 +17,7 @@
"lists" : {
"is_replaced_by" : {"coerce" : "issn", "contains" : "field", "set__allow_coerce_failure" : True},
"keywords" : {"contains" : "field", "coerce" : "unicode_lower"},
- "language" : {"contains" : "field", "coerce" : "isolang_2letter"},
+ "language" : {"contains" : "field", "coerce" : "isolang_2letter_lax"},
"license" : {"contains" : "object"},
"replaces" : {"contains" : "field", "coerce" : "issn", "set__allow_coerce_failure" : True},
"subject" : {"contains" : "object"}
diff --git a/portality/regex.py b/portality/regex.py
index c50f053f7f..a298a4731f 100644
--- a/portality/regex.py
+++ b/portality/regex.py
@@ -17,7 +17,15 @@
BIG_END_DATE_COMPILED = re.compile(BIG_END_DATE)
#~~URL:Regex~~
-HTTP_URL = r'^https?://([^/:]+\.[a-z]{2,63}|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$'
+HTTP_URL = (
+ r'^(?:https?)://' # Scheme: http(s) or ftp
+ r'(?:[\w-]+\.)*[\w-]+' # Domain name (optional subdomains)
+ r'(?:\.[a-z]{2,})' # Top-level domain (e.g., .com, .org)
+ r'(?:\/[^\/\s]*)*' # Path (optional)
+ r'(?:\?[^\/\s]*)?' # Query string (optional)
+ r'(?:#[^\/\s]*)?$' # Fragment (optional)
+)
+
HTTP_URL_COMPILED = re.compile(HTTP_URL, re.IGNORECASE)
diff --git a/portality/scripts/application_status_report.py b/portality/scripts/application_status_report.py
new file mode 100644
index 0000000000..e94192c471
--- /dev/null
+++ b/portality/scripts/application_status_report.py
@@ -0,0 +1,85 @@
+from portality import models
+import csv
+
+"""This script generates a report of the status of applications in the DOAJ. The output is a CSV file with number
+ of applications in each status(new, accepted, rejected) for each year."""
+
+
+def date_applied_query(date_year):
+ return {
+ "query": {
+ "bool": {
+ "must": [
+ {
+ "term": {
+ "admin.application_type.exact": "new_application"
+ }
+ },
+ {
+ "range": {
+ "admin.date_applied": {
+ "gte": str(date_year) + "-01-01",
+ "lte": str(date_year) + "-12-31"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+
+
+def status_query(date_year, status):
+ return {
+ "query": {
+ "bool": {
+ "must": [
+ {
+ "term": {
+ "action": "status:" + status
+ }
+ },
+ {
+ "range": {
+ "created_date": {
+ "gte": str(date_year) + "-01-01",
+ "lte": str(date_year) + "-12-31"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+
+
+if __name__ == "__main__":
+
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-o", "--out", help="output file path")
+ parser.add_argument("-y", "--year", help="year to filter by")
+ args = parser.parse_args()
+
+ if not args.out:
+ print("Please specify an output file path with the -o option")
+ parser.print_help()
+ exit()
+
+ if not args.year:
+ print("Please specify a year to filter the applications with the -y option")
+ parser.print_help()
+ exit()
+
+ with open(args.out, "w", encoding="utf-8") as f:
+ writer = csv.writer(f)
+
+ res = models.Application.query(q=date_applied_query(args.year), size=0)
+ writer.writerow(["Submitted", res.get("hits", {}).get("total", {}).get("value", 0)])
+
+ res = models.Provenance.query(q=status_query(args.year, "accepted"), size=0)
+ writer.writerow(["Accepted", res.get("hits", {}).get("total", {}).get("value", 0)])
+
+ res = models.Provenance.query(q=status_query(args.year, "rejected"), size=0)
+ writer.writerow(["Rejected", res.get("hits", {}).get("total", {}).get("value", 0)])
diff --git a/portality/scripts/blank_field_finder.py b/portality/scripts/blank_field_finder.py
new file mode 100644
index 0000000000..028332b1a6
--- /dev/null
+++ b/portality/scripts/blank_field_finder.py
@@ -0,0 +1,87 @@
+import argparse
+from pathlib import Path
+from typing import Any, Iterable
+
+from portality.bll.services.journal import JournalService
+from portality.lib import csv_utils
+from portality.models import Application, Journal
+
+
+def to_k_v(item: Any, prefix: list = None):
+ if prefix is None:
+ prefix = []
+
+ if isinstance(item, dict):
+ for k, v in item.items():
+ yield from to_k_v(v, prefix=prefix + [k])
+
+ elif isinstance(item, list):
+ for k, v in enumerate(item):
+ yield from to_k_v(v, prefix=prefix + [k])
+ else:
+ yield '.'.join(map(str, prefix)), str(item)
+
+
+def tee(txt: str, out_file):
+ print(txt)
+ out_file.write(txt + '\n')
+
+
+def write_bad_data_domain_object(domain_object_class: Any, out_path):
+ with open(out_path, 'w') as f:
+ items = iter(domain_object_class.iterall())
+ while True:
+ try:
+ j = next(items, None)
+ except:
+ continue
+
+ if j is None:
+ break
+
+ for k, v in filter_bad_only(to_k_v(j.data)):
+ tee(f'{j.id} {k} [{v}]', f)
+
+
+def main2():
+ with open('/tmp/journals.csv', 'w') as f:
+ JournalService._make_journals_csv(f)
+
+
+def is_bad_str(v: str):
+ return isinstance(v, str) and v != v.strip()
+
+
+def filter_bad_only(row: Iterable):
+ return (i for i in row if is_bad_str(i[1]))
+
+
+def write_bad_data_journals_csv(csv_path, out_path):
+ with open(out_path, 'w') as out_file:
+ for row in csv_utils.read_all(csv_path, as_dict=True):
+ for k, v in filter_bad_only(row.items()):
+ tee(f'{k} [{v}]', out_file)
+
+
+def write_results(journal_csv_path, out_dir):
+ # out_dir = Path('/tmp')
+ # journal_csv_path = '/home/kk/tmp/journals.csv'
+ out_dir = Path(out_dir)
+ write_bad_data_domain_object(Application, out_dir / 'bad_app.txt')
+ write_bad_data_domain_object(Journal, out_dir / 'bad_journals.txt')
+ if journal_csv_path:
+ write_bad_data_journals_csv(journal_csv_path, out_dir / 'bad_journals_csv.txt')
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Output file with bad data')
+ parser.add_argument('-i', '--input', help='Path of input CSV file', type=str, default=None)
+ parser.add_argument('-o', '--output', help='Output directory', type=str, default='.')
+ args = parser.parse_args(
+ # ['-i', '/home/kk/tmp/journals.csv', '-o', '/tmp']
+ )
+ write_results(args.input, args.output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/portality/scripts/priorities.csv b/portality/scripts/priorities.csv
index 08e56c10b5..75c95cf037 100644
--- a/portality/scripts/priorities.csv
+++ b/portality/scripts/priorities.csv
@@ -1,6 +1,7 @@
id,labels,columns
HP/DaR,"Priority: High, Type: Data at Risk",
HP/bug,"Priority: High, bug",
+Deadline,Priority: Deadline,
HP/PfL,"Prioroty: High, Workflow: Pending for Live",Review
HP/sup,"Priority: High, Origin: Support",
Test1,Workflow: On Test,Review
diff --git a/portality/settings.py b/portality/settings.py
index 43889bcf09..c01aa845ba 100644
--- a/portality/settings.py
+++ b/portality/settings.py
@@ -9,7 +9,7 @@
# Application Version information
# ~~->API:Feature~~
-DOAJ_VERSION = "6.3.7"
+DOAJ_VERSION = "6.3.13"
API_VERSION = "3.0.1"
######################################
@@ -26,6 +26,11 @@
SESSION_COOKIE_SECURE=True
REMEMBER_COOKIE_SECURE = True
+####################################
+# Testdrive for setting up the test environment.
+# CAUTION - this can modify the index so should NEVER be used in production!
+TESTDRIVE_ENABLED = False
+
####################################
# Debug Mode
@@ -434,6 +439,7 @@
"anon_export": {"month": "*", "day": "10", "day_of_week": "*", "hour": "6", "minute": "30"},
"old_data_cleanup": {"month": "*", "day": "12", "day_of_week": "*", "hour": "6", "minute": "30"},
"monitor_bgjobs": {"month": "*", "day": "*/6", "day_of_week": "*", "hour": "10", "minute": "0"},
+ "find_discontinued_soon": {"month": "*", "day": "*", "day_of_week": "*", "hour": "0", "minute": "3"}
}
HUEY_TASKS = {
@@ -526,7 +532,17 @@
}
}
},
- "isolang_2letter": {
+ "isolang_2letter_strict": {
+ "type": "text",
+ "fields": {
+ "exact": {
+ "type": "keyword",
+# "index": False,
+ "store": True
+ }
+ }
+ },
+ "isolang_2letter_lax": {
"type": "text",
"fields": {
"exact": {
@@ -546,7 +562,7 @@
}
}
},
- "currency_code": {
+ "currency_code_strict": {
"type": "text",
"fields": {
"exact": {
@@ -1156,6 +1172,11 @@
# ~~->OpenURL:Feature~~
GA_CATEGORY_OPENURL = 'OpenURL'
+# GA for PublicDataDump
+# ~~->PublicDataDump:Feature~~
+GA_CATEGORY_PUBLICDATADUMP = 'PublicDataDump'
+GA_ACTION_PUBLICDATADUMP = 'Download'
+
# GA for API
# ~~-> API:Feature~~
GA_CATEGORY_API = 'API Hit'
@@ -1365,3 +1386,6 @@
# Pages under maintenance
PRESERVATION_PAGE_UNDER_MAINTENANCE = False
+
+# report journals that discontinue in ... days (eg. 1 = tomorrow)
+DISCONTINUED_DATE_DELTA = 0
\ No newline at end of file
diff --git a/portality/static/doaj/docs/2023-07-04-DOAJQuestions.pdf b/portality/static/doaj/docs/2023-07-04-DOAJQuestions.pdf
new file mode 100644
index 0000000000..a75079cd30
Binary files /dev/null and b/portality/static/doaj/docs/2023-07-04-DOAJQuestions.pdf differ
diff --git a/portality/static/js/doaj.fieldrender.edges.js b/portality/static/js/doaj.fieldrender.edges.js
index f33e58686b..49faf4b543 100644
--- a/portality/static/js/doaj.fieldrender.edges.js
+++ b/portality/static/js/doaj.fieldrender.edges.js
@@ -645,13 +645,13 @@ $.extend(true, doaj, {
toggle = '';
}
var placeholder = 'Search ' + this.component.nodeCount + ' subjects';
- var frag = '
' + this.title + toggle + '
\
-
\
+ var frag = '
\
+
\
\
\
\
{{FILTERS}}
\
-
';
+
';
// substitute in the component parts
frag = frag.replace(/{{FILTERS}}/g, treeFrag);
@@ -1551,13 +1551,12 @@ $.extend(true, doaj, {
var textIdSelector = edges.css_id_selector(this.namespace, "text", this);
var text = this.component.jq(textIdSelector).val();
- if (text === "") {
- return;
- }
-
// if there is search text, then proceed to run the search
var val = this.component.jq(element).val();
this.component.setSearchField(val, false);
+ if (text === "") {
+ return;
+ }
this.component.setSearchText(text);
};
@@ -1833,10 +1832,10 @@ $.extend(true, doaj, {
if (this.togglable) {
toggle = '';
}
- var frag = '
' + this.component.display + toggle + '
\
-
\
+ var frag = '
\
+
\
{{FILTERS}}
\
-
';
+
';
// substitute in the component parts
frag = frag.replace(/{{FILTERS}}/g, filterFrag + results);
@@ -2084,10 +2083,10 @@ $.extend(true, doaj, {
if (this.togglable) {
toggle = '';
}
- var frag = '