Skip to content

Commit

Permalink
move source code from src to root
Browse files Browse the repository at this point in the history
  • Loading branch information
JR-1991 committed May 7, 2024
1 parent 8b011e7 commit 11ace57
Show file tree
Hide file tree
Showing 33 changed files with 71 additions and 64 deletions.
5 changes: 1 addition & 4 deletions src/pyDataverse/__init__.py → pyDataverse/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,12 @@

from __future__ import absolute_import

import urllib3

urllib3.disable_warnings() # noqa

__author__ = "Stefan Kasberger"
__email__ = "[email protected]"
__copyright__ = "Copyright (c) 2019 Stefan Kasberger"
__license__ = "MIT License"
__version__ = "0.3.1"
__version__ = "0.3.2"
__url__ = "https://github.com/GDCC/pyDataverse"
__download_url__ = "https://pypi.python.org/pypi/pyDataverse"
__description__ = "A Python module for Dataverse."
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ Prepare

**Additional Resources**

- CSV templates from ``src/pyDataverse/templates/`` are used (see :ref:`CSV templates <user_csv-templates>`)
- CSV templates from ``pyDataverse/templates/`` are used (see :ref:`CSV templates <user_csv-templates>`)
- Data from ``tests/data/user-guide/`` is used (`GitHub repo <https://github.com/gdcc/pyDataverse/tree/master/tests/data/user-guide>`_)


Expand Down Expand Up @@ -101,10 +101,10 @@ converts boolean values, and loads JSON cells properly.
Once we have the data in Python, we can easily import the data into
pyDataverse.

For this, loop over each Dataset :class:`dict`, to:
For this, loop over each Dataset :class:`dict`, to:

#. Instantiate an empty :class:`Dataset <pyDataverse.models.Dataset>`
#. add the data with :meth:`set() <pyDataverse.models.Dataset.set>` and
#. add the data with :meth:`set() <pyDataverse.models.Dataset.set>` and
#. append the instance to a :class:`list`.

::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,12 @@ There is also a more detailed tutorial on how to use the CSV templates
for mass imports in the
:ref:`User Guide - Advanced <advanced-usage_data-migration>`.

The CSV templates can be found in ``src/pyDataverse/templates/``
(`GitHub repo <https://github.com/gdcc/pyDataverse/tree/master/src/pyDataverse/templates>`_):
The CSV templates can be found in ``pyDataverse/templates/``
(`GitHub repo <https://github.com/gdcc/pyDataverse/tree/master/pyDataverse/templates>`_):

- `dataverses.csv <https://raw.githubusercontent.com/gdcc/pyDataverse/master/src/pyDataverse/templates/dataverses.csv>`_
- `datasets.csv <https://raw.githubusercontent.com/gdcc/pyDataverse/master/src/pyDataverse/templates/datasets.csv>`_
- `datafiles.csv <https://raw.githubusercontent.com/gdcc/pyDataverse/master/src/pyDataverse/templates/datafiles.csv>`_
- `dataverses.csv <https://raw.githubusercontent.com/gdcc/pyDataverse/master/pyDataverse/templates/dataverses.csv>`_
- `datasets.csv <https://raw.githubusercontent.com/gdcc/pyDataverse/master/pyDataverse/templates/datasets.csv>`_
- `datafiles.csv <https://raw.githubusercontent.com/gdcc/pyDataverse/master/pyDataverse/templates/datafiles.csv>`_


.. _user_csv-templates_usage_create-csv:
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
109 changes: 59 additions & 50 deletions src/pyDataverse/models.py → pyDataverse/models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Dataverse data-types data model."""

from __future__ import absolute_import

import json
Expand Down Expand Up @@ -103,7 +104,9 @@ def validate_json(self, filename_schema=None):
assert isinstance(filename_schema, str)

return validate_data(
json.loads(self.json(validate=False)), filename_schema, file_format="json",
json.loads(self.json(validate=False)),
filename_schema,
file_format="json",
)

def from_json(
Expand Down Expand Up @@ -867,7 +870,6 @@ def from_json(
)

if "metadataBlocks" in json_dict["datasetVersion"]:

# citation
if "citation" in json_dict["datasetVersion"]["metadataBlocks"]:
citation = json_dict["datasetVersion"]["metadataBlocks"]["citation"]
Expand Down Expand Up @@ -954,7 +956,8 @@ def from_json(

if "displayName" in socialscience:
self.__setattr__(
"socialscience_displayName", socialscience["displayName"],
"socialscience_displayName",
socialscience["displayName"],
)

for field in socialscience["fields"]:
Expand All @@ -966,27 +969,27 @@ def from_json(
elif field["typeName"] == "targetSampleSize":
data["targetSampleSize"] = {}
if "targetSampleActualSize" in field["value"]:
data["targetSampleSize"][
"targetSampleActualSize"
] = field["value"]["targetSampleActualSize"]["value"]
data["targetSampleSize"]["targetSampleActualSize"] = (
field["value"]["targetSampleActualSize"]["value"]
)
if "targetSampleSizeFormula" in field["value"]:
data["targetSampleSize"][
"targetSampleSizeFormula"
] = field["value"]["targetSampleSizeFormula"]["value"]
data["targetSampleSize"]["targetSampleSizeFormula"] = (
field["value"]["targetSampleSizeFormula"]["value"]
)
elif field["typeName"] == "socialScienceNotes":
data["socialScienceNotes"] = {}
if "socialScienceNotesType" in field["value"]:
data["socialScienceNotes"][
"socialScienceNotesType"
] = field["value"]["socialScienceNotesType"]["value"]
data["socialScienceNotes"]["socialScienceNotesType"] = (
field["value"]["socialScienceNotesType"]["value"]
)
if "socialScienceNotesSubject" in field["value"]:
data["socialScienceNotes"][
"socialScienceNotesSubject"
] = field["value"]["socialScienceNotesSubject"]["value"]
if "socialScienceNotesText" in field["value"]:
data["socialScienceNotes"][
"socialScienceNotesText"
] = field["value"]["socialScienceNotesText"]["value"]
data["socialScienceNotes"]["socialScienceNotesText"] = (
field["value"]["socialScienceNotesText"]["value"]
)
else:
print(
"Attribute {0} not valid for import (dv_up).".format(
Expand Down Expand Up @@ -1200,7 +1203,10 @@ def json(self, data_format=None, validate=True, filename_schema=None):
)

# Generate fields attributes
for (key, val,) in self.__attr_import_dv_up_citation_fields_arrays.items():
for (
key,
val,
) in self.__attr_import_dv_up_citation_fields_arrays.items():
if key in data_dict:
v = data_dict[key]
citation["fields"].append(
Expand Down Expand Up @@ -1341,25 +1347,25 @@ def json(self, data_format=None, validate=True, filename_schema=None):
if "targetSampleActualSize" in target_sample_size:
if target_sample_size["targetSampleActualSize"] is not None:
tmp_dict["targetSampleActualSize"] = {}
tmp_dict["targetSampleActualSize"][
"typeName"
] = "targetSampleActualSize"
tmp_dict["targetSampleActualSize"]["typeName"] = (
"targetSampleActualSize"
)
tmp_dict["targetSampleActualSize"]["multiple"] = False
tmp_dict["targetSampleActualSize"]["typeClass"] = "primitive"
tmp_dict["targetSampleActualSize"][
"value"
] = target_sample_size["targetSampleActualSize"]
tmp_dict["targetSampleActualSize"]["value"] = (
target_sample_size["targetSampleActualSize"]
)
if "targetSampleSizeFormula" in target_sample_size:
if target_sample_size["targetSampleSizeFormula"] is not None:
tmp_dict["targetSampleSizeFormula"] = {}
tmp_dict["targetSampleSizeFormula"][
"typeName"
] = "targetSampleSizeFormula"
tmp_dict["targetSampleSizeFormula"]["typeName"] = (
"targetSampleSizeFormula"
)
tmp_dict["targetSampleSizeFormula"]["multiple"] = False
tmp_dict["targetSampleSizeFormula"]["typeClass"] = "primitive"
tmp_dict["targetSampleSizeFormula"][
"value"
] = target_sample_size["targetSampleSizeFormula"]
tmp_dict["targetSampleSizeFormula"]["value"] = (
target_sample_size["targetSampleSizeFormula"]
)
socialscience["fields"].append(
{
"typeName": "targetSampleSize",
Expand All @@ -1376,36 +1382,36 @@ def json(self, data_format=None, validate=True, filename_schema=None):
if "socialScienceNotesType" in social_science_notes:
if social_science_notes["socialScienceNotesType"] is not None:
tmp_dict["socialScienceNotesType"] = {}
tmp_dict["socialScienceNotesType"][
"typeName"
] = "socialScienceNotesType"
tmp_dict["socialScienceNotesType"]["typeName"] = (
"socialScienceNotesType"
)
tmp_dict["socialScienceNotesType"]["multiple"] = False
tmp_dict["socialScienceNotesType"]["typeClass"] = "primitive"
tmp_dict["socialScienceNotesType"][
"value"
] = social_science_notes["socialScienceNotesType"]
tmp_dict["socialScienceNotesType"]["value"] = (
social_science_notes["socialScienceNotesType"]
)
if "socialScienceNotesSubject" in social_science_notes:
if social_science_notes["socialScienceNotesSubject"] is not None:
tmp_dict["socialScienceNotesSubject"] = {}
tmp_dict["socialScienceNotesSubject"][
"typeName"
] = "socialScienceNotesSubject"
tmp_dict["socialScienceNotesSubject"]["typeName"] = (
"socialScienceNotesSubject"
)
tmp_dict["socialScienceNotesSubject"]["multiple"] = False
tmp_dict["socialScienceNotesSubject"]["typeClass"] = "primitive"
tmp_dict["socialScienceNotesSubject"][
"value"
] = social_science_notes["socialScienceNotesSubject"]
tmp_dict["socialScienceNotesSubject"]["value"] = (
social_science_notes["socialScienceNotesSubject"]
)
if "socialScienceNotesText" in social_science_notes:
if social_science_notes["socialScienceNotesText"] is not None:
tmp_dict["socialScienceNotesText"] = {}
tmp_dict["socialScienceNotesText"][
"typeName"
] = "socialScienceNotesText"
tmp_dict["socialScienceNotesText"]["typeName"] = (
"socialScienceNotesText"
)
tmp_dict["socialScienceNotesText"]["multiple"] = False
tmp_dict["socialScienceNotesText"]["typeClass"] = "primitive"
tmp_dict["socialScienceNotesText"][
"value"
] = social_science_notes["socialScienceNotesText"]
tmp_dict["socialScienceNotesText"]["value"] = (
social_science_notes["socialScienceNotesText"]
)
socialscience["fields"].append(
{
"typeName": "socialScienceNotes",
Expand Down Expand Up @@ -1456,7 +1462,10 @@ def json(self, data_format=None, validate=True, filename_schema=None):
)

# Generate fields attributes
for (key, val,) in self.__attr_import_dv_up_journal_fields_arrays.items():
for (
key,
val,
) in self.__attr_import_dv_up_journal_fields_arrays.items():
if key in data_dict:
journal["fields"].append(
{
Expand All @@ -1469,9 +1478,9 @@ def json(self, data_format=None, validate=True, filename_schema=None):

data["datasetVersion"]["metadataBlocks"]["citation"] = citation
if "socialscience" in locals():
data["datasetVersion"]["metadataBlocks"][
"socialscience"
] = socialscience
data["datasetVersion"]["metadataBlocks"]["socialscience"] = (
socialscience
)
if "geospatial" in locals():
data["datasetVersion"]["metadataBlocks"]["geospatial"] = geospatial
if "journal" in locals():
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
5 changes: 3 additions & 2 deletions src/pyDataverse/utils.py → pyDataverse/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Helper functions."""

import csv
import json
import os
Expand Down Expand Up @@ -585,13 +586,13 @@ def dataverse_tree_walker(
datasets = []
datafiles = []

if type(data) == list:
if isinstance(data, list):
for elem in data:
dv, ds, df = dataverse_tree_walker(elem)
dataverses += dv
datasets += ds
datafiles += df
elif type(data) == dict:
elif isinstance(data, dict):
if data["type"] == "dataverse":
dv_tmp = {}
for key in dv_keys:
Expand Down

0 comments on commit 11ace57

Please sign in to comment.