diff --git a/docs/conf.py b/docs/conf.py index ea40908..be4a3c6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -90,7 +90,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/giddy/ergodic.py b/giddy/ergodic.py index 74a2b08..73d91ff 100644 --- a/giddy/ergodic.py +++ b/giddy/ergodic.py @@ -148,8 +148,11 @@ def steady_state(P, fill_empty_classes=False): def _fmpt_ergodic(P): - warn('_fmpt_ergodic is deprecated. It will be replaced in giddy 2.5 with _mfpt_', - DeprecationWarning, stacklevel=2) + warn( + "_fmpt_ergodic is deprecated. It will be replaced in giddy 2.5 with _mfpt_", + DeprecationWarning, + stacklevel=2, + ) return _mfpt_ergodic(P) @@ -213,8 +216,11 @@ def _mfpt_ergodic(P): def fmpt(P, fill_empty_classes=False): - warn('fmpt is deprecated. It will be replaced in giddy 2.5 with mfpt', - DeprecationWarning, stacklevel=2) + warn( + "fmpt is deprecated. It will be replaced in giddy 2.5 with mfpt", + DeprecationWarning, + stacklevel=2, + ) return mfpt(P, fill_empty_classes) @@ -346,8 +352,11 @@ def mfpt(P, fill_empty_classes=False): def var_fmpt_ergodic(p): - warn('var_fmpt_ergodic is deprecated. It will be replaced in giddy 2.5 with var_fmpt_ergodic', - DeprecationWarning, stacklevel=2) + warn( + "var_fmpt_ergodic is deprecated. It will be replaced in giddy 2.5 with var_fmpt_ergodic", + DeprecationWarning, + stacklevel=2, + ) return var_mfpt_ergodic(p) diff --git a/giddy/markov.py b/giddy/markov.py index 431afac..6876613 100644 --- a/giddy/markov.py +++ b/giddy/markov.py @@ -284,7 +284,7 @@ def __init__(self, class_ids, classes=None, fill_empty_classes=False, summary=Tr @property def mfpt(self): - warn('self._mfpt is deprecated. Please use self._mfpt') + warn("self._mfpt is deprecated. Please use self._mfpt") if not hasattr(self, "_mfpt"): self._mfpt = mfpt(self.p, fill_empty_classes=True) return self._mfpt @@ -800,7 +800,6 @@ def __init__( variable_name=None, fill_empty_classes=False, ): - y = np.asarray(y) self.fixed = fixed self.discrete = discrete @@ -2038,7 +2037,6 @@ class FullRank_Markov(Markov): """ def __init__(self, y, fill_empty_classes=False, summary=True): - y = np.asarray(y) # resolve ties: All values are given a distinct rank, corresponding # to the order that the values occur in each cross section. diff --git a/giddy/rank.py b/giddy/rank.py index 076f2a7..d89fff9 100644 --- a/giddy/rank.py +++ b/giddy/rank.py @@ -351,7 +351,6 @@ class SpatialTau(object): """ def __init__(self, x, y, w, permutations=0): - w.transform = "b" self.n = len(x) res = Tau(x, y) @@ -478,7 +477,6 @@ class Tau_Local: """ def __init__(self, x, y): - self.n = len(x) x = np.asarray(x) y = np.asarray(y) @@ -596,7 +594,6 @@ class Tau_Local_Neighbor: """ def __init__(self, x, y, w, permutations=0): - x = np.asarray(x) y = np.asarray(y) self.n = len(x) @@ -751,7 +748,6 @@ class Tau_Local_Neighborhood: """ def __init__(self, x, y, w, permutations=0): - x = np.asarray(x) y = np.asarray(y) res = Tau_Local(x, y) @@ -885,7 +881,6 @@ class Tau_Regional: """ def __init__(self, x, y, regime, permutations=0): - x = np.asarray(x) y = np.asarray(y) res = Tau_Local(x, y) @@ -926,7 +921,6 @@ def __init__(self, x, y, regime, permutations=0): self.tau_reg_pvalues = pvalues def _calc(self, W, WH, P, S): - nomi = np.dot(P, np.dot(S, P.T)) denomi = np.dot(P, np.dot(W, P.T)) + np.dot(P, np.dot(WH, P.T)) T = nomi / denomi diff --git a/giddy/sequence.py b/giddy/sequence.py index 768fd81..4eb3ccd 100644 --- a/giddy/sequence.py +++ b/giddy/sequence.py @@ -11,6 +11,7 @@ import scipy.spatial.distance as d from .markov import Markov + class Sequence(object): """ Pairwise sequence analysis. @@ -153,7 +154,6 @@ class Sequence(object): """ def __init__(self, y, subs_mat=None, dist_type=None, indel=None, cluster_type=None): - y = np.asarray(y) merged = list(itertools.chain.from_iterable(y)) self.classes = np.unique(merged) @@ -192,7 +192,9 @@ def __init__(self, y, subs_mat=None, dist_type=None, indel=None, cluster_type=No "sequences of unequal lengths!" ) - hamming_dist = d.pdist(y_int.astype(int), metric="hamming") * y_int.shape[1] + hamming_dist = ( + d.pdist(y_int.astype(int), metric="hamming") * y_int.shape[1] + ) self.seq_dis_mat = d.squareform(hamming_dist) elif dist_type.lower() == "arbitrary": diff --git a/notebooks/RankBasedMethods.ipynb b/notebooks/RankBasedMethods.ipynb index da5781c..9b43b7e 100644 --- a/notebooks/RankBasedMethods.ipynb +++ b/notebooks/RankBasedMethods.ipynb @@ -968,11 +968,15 @@ "from libpysal.weights import block_weights\n", "\n", "with warnings.catch_warnings():\n", - " warnings.filterwarnings(\"ignore\", message=\"The weights matrix is not fully connected\")\n", + " warnings.filterwarnings(\n", + " \"ignore\", message=\"The weights matrix is not fully connected\"\n", + " )\n", "\n", " w = block_weights(complete_table[\"BEA region\"])\n", " np.random.seed(12345)\n", - " tau_w = giddy.rank.SpatialTau(complete_table[\"1929\"], complete_table[\"2009\"], w, 999)" + " tau_w = giddy.rank.SpatialTau(\n", + " complete_table[\"1929\"], complete_table[\"2009\"], w, 999\n", + " )" ] }, { @@ -1244,11 +1248,16 @@ "outputs": [], "source": [ "with warnings.catch_warnings():\n", - " warnings.filterwarnings(\"ignore\", message=\"The weights matrix is not fully connected\")\n", + " warnings.filterwarnings(\n", + " \"ignore\", message=\"The weights matrix is not fully connected\"\n", + " )\n", " np.random.seed(12345)\n", - " \n", + "\n", " tau_w = giddy.rank.Tau_Regional(\n", - " complete_table[\"1929\"], complete_table[\"2009\"], complete_table[\"BEA region\"], 999\n", + " complete_table[\"1929\"],\n", + " complete_table[\"2009\"],\n", + " complete_table[\"BEA region\"],\n", + " 999,\n", " )" ] }, diff --git a/requirements_docs.txt b/requirements_docs.txt index 7134d48..e2e48c6 100644 --- a/requirements_docs.txt +++ b/requirements_docs.txt @@ -1,5 +1,4 @@ nbsphinx -nbsphinx-link numpydoc sphinx>=1.4.3 sphinxcontrib-bibtex diff --git a/tools/gitcount.ipynb b/tools/gitcount.ipynb index ffc28d3..ed19906 100644 --- a/tools/gitcount.ipynb +++ b/tools/gitcount.ipynb @@ -27,8 +27,15 @@ "source": [ "# get date of last tag\n", "from subprocess import Popen, PIPE\n", - "x, err = Popen('git log -1 --tags --simplify-by-decoration --pretty=\"%ai\"| cat', stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True).communicate()\n", - "start_date = x.split()[0].decode('utf-8')\n", + "\n", + "x, err = Popen(\n", + " 'git log -1 --tags --simplify-by-decoration --pretty=\"%ai\"| cat',\n", + " stdin=PIPE,\n", + " stdout=PIPE,\n", + " stderr=PIPE,\n", + " shell=True,\n", + ").communicate()\n", + "start_date = x.split()[0].decode(\"utf-8\")\n", "start_date" ] }, @@ -40,6 +47,7 @@ "source": [ "# today's date\n", "import datetime\n", + "\n", "release_date = str(datetime.datetime.today()).split()[0]\n", "release_date" ] @@ -50,7 +58,7 @@ "metadata": {}, "outputs": [], "source": [ - "package_name = 'giddy'\n", + "package_name = \"giddy\"\n", "# release_date = '2019-12-20'\n", "# start_date = '2019-12-20'" ] @@ -78,6 +86,7 @@ "from datetime import datetime, timedelta\n", "from time import sleep\n", "from subprocess import check_output\n", + "\n", "try:\n", " from urllib import urlopen\n", "except:\n", @@ -86,7 +95,7 @@ "import ssl\n", "import yaml\n", "\n", - "context = ssl._create_unverified_context()\n" + "context = ssl._create_unverified_context()" ] }, { @@ -115,7 +124,7 @@ "source": [ "since_date = '--since=\"{start}\"'.format(start=start_date)\n", "since_date\n", - "since = datetime.strptime(start_date+\" 0:0:0\", \"%Y-%m-%d %H:%M:%S\")\n", + "since = datetime.strptime(start_date + \" 0:0:0\", \"%Y-%m-%d %H:%M:%S\")\n", "since" ] }, @@ -128,9 +137,8 @@ "# get __version__\n", "f = \"../{package}/__init__.py\".format(package=package_name)\n", "\n", - "with open(f, 'r') as initfile:\n", - " exec(initfile.readline())\n", - " " + "with open(f, \"r\") as initfile:\n", + " exec(initfile.readline())" ] }, { @@ -146,7 +154,7 @@ "metadata": {}, "outputs": [], "source": [ - "cmd = ['git', 'log', '--oneline', since_date]\n", + "cmd = [\"git\", \"log\", \"--oneline\", since_date]\n", "ncommits = len(check_output(cmd).splitlines())" ] }, @@ -179,21 +187,23 @@ "metadata": {}, "outputs": [], "source": [ - "identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),\n", - " 'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),\n", - " 'Wei Kang': ('Wei Kang', 'weikang9009'),\n", - " 'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas')\n", + "identities = {\n", + " \"Levi John Wolf\": (\"ljwolf\", \"Levi John Wolf\"),\n", + " \"Serge Rey\": (\"Serge Rey\", \"Sergio Rey\", \"sjsrey\", \"serge\"),\n", + " \"Wei Kang\": (\"Wei Kang\", \"weikang9009\"),\n", + " \"Dani Arribas-Bel\": (\"Dani Arribas-Bel\", \"darribas\"),\n", "}\n", "\n", + "\n", "def regularize_identity(string):\n", " string = string.decode()\n", " for name, aliases in identities.items():\n", " for alias in aliases:\n", " if alias in string:\n", " string = string.replace(alias, name)\n", - " if len(string.split(' '))>1:\n", + " if len(string.split(\" \")) > 1:\n", " string = string.title()\n", - " return string.lstrip('* ')" + " return string.lstrip(\"* \")" ] }, { @@ -202,7 +212,7 @@ "metadata": {}, "outputs": [], "source": [ - "author_cmd = ['git', 'log', '--format=* %aN', since_date]" + "author_cmd = [\"git\", \"log\", \"--format=* %aN\", since_date]" ] }, { @@ -220,13 +230,12 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "ncommits = len(check_output(cmd).splitlines())\n", "all_authors = check_output(author_cmd).splitlines()\n", "counter = Counter([regularize_identity(author) for author in all_authors])\n", "# global_counter += counter\n", "# counters.update({'.'.join((package,subpackage)): counter})\n", - "unique_authors = sorted(set(all_authors))\n" + "unique_authors = sorted(set(all_authors))" ] }, { @@ -261,10 +270,11 @@ "outputs": [], "source": [ "from datetime import datetime, timedelta\n", + "\n", "ISO8601 = \"%Y-%m-%dT%H:%M:%SZ\"\n", "PER_PAGE = 100\n", - "element_pat = re.compile(r'<(.+?)>')\n", - "rel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')\n" + "element_pat = re.compile(r\"<(.+?)>\")\n", + "rel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')" ] }, { @@ -273,31 +283,37 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "def parse_link_header(headers):\n", - " link_s = headers.get('link', '')\n", + " link_s = headers.get(\"link\", \"\")\n", " urls = element_pat.findall(link_s)\n", " rels = rel_pat.findall(link_s)\n", " d = {}\n", - " for rel,url in zip(rels, urls):\n", + " for rel, url in zip(rels, urls):\n", " d[rel] = url\n", " return d\n", "\n", + "\n", "def get_paged_request(url):\n", " \"\"\"get a full list, handling APIv3's paging\"\"\"\n", " results = []\n", " while url:\n", - " #print(\"fetching %s\" % url, file=sys.stderr)\n", + " # print(\"fetching %s\" % url, file=sys.stderr)\n", " f = urlopen(url)\n", " results.extend(json.load(f))\n", " links = parse_link_header(f.headers)\n", - " url = links.get('next')\n", + " url = links.get(\"next\")\n", " return results\n", "\n", + "\n", "def get_issues(project=\"pysal/giddy\", state=\"closed\", pulls=False):\n", " \"\"\"Get a list of the issues from the Github API.\"\"\"\n", - " which = 'pulls' if pulls else 'issues'\n", - " url = \"https://api.github.com/repos/%s/%s?state=%s&per_page=%i\" % (project, which, state, PER_PAGE)\n", + " which = \"pulls\" if pulls else \"issues\"\n", + " url = \"https://api.github.com/repos/%s/%s?state=%s&per_page=%i\" % (\n", + " project,\n", + " which,\n", + " state,\n", + " PER_PAGE,\n", + " )\n", " return get_paged_request(url)\n", "\n", "\n", @@ -313,54 +329,54 @@ " \"\"\"Convert a list of issues to a dict, keyed by issue number.\"\"\"\n", " idict = {}\n", " for i in issues:\n", - " idict[i['number']] = i\n", + " idict[i[\"number\"]] = i\n", " return idict\n", "\n", "\n", "def is_pull_request(issue):\n", " \"\"\"Return True if the given issue is a pull request.\"\"\"\n", - " return 'pull_request_url' in issue\n", + " return \"pull_request_url\" in issue\n", "\n", "\n", "def issues_closed_since(period=timedelta(days=365), project=\"pysal/pysal\", pulls=False):\n", " \"\"\"Get all issues closed since a particular point in time. period\n", - "can either be a datetime object, or a timedelta object. In the\n", - "latter case, it is used as a time before the present.\"\"\"\n", + " can either be a datetime object, or a timedelta object. In the\n", + " latter case, it is used as a time before the present.\"\"\"\n", "\n", - " which = 'pulls' if pulls else 'issues'\n", + " which = \"pulls\" if pulls else \"issues\"\n", "\n", " if isinstance(period, timedelta):\n", " period = datetime.now() - period\n", - " url = \"https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i\" % (project, which, period.strftime(ISO8601), PER_PAGE)\n", + " url = (\n", + " \"https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i\"\n", + " % (project, which, period.strftime(ISO8601), PER_PAGE)\n", + " )\n", " allclosed = get_paged_request(url)\n", " # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)\n", - " filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]\n", + " filtered = [i for i in allclosed if _parse_datetime(i[\"closed_at\"]) > period]\n", "\n", " # exclude rejected PRs\n", " if pulls:\n", - " filtered = [ pr for pr in filtered if pr['merged_at'] ]\n", + " filtered = [pr for pr in filtered if pr[\"merged_at\"]]\n", "\n", " return filtered\n", "\n", "\n", - "def sorted_by_field(issues, field='closed_at', reverse=False):\n", + "def sorted_by_field(issues, field=\"closed_at\", reverse=False):\n", " \"\"\"Return a list of issues sorted by closing date date.\"\"\"\n", - " return sorted(issues, key = lambda i:i[field], reverse=reverse)\n", + " return sorted(issues, key=lambda i: i[field], reverse=reverse)\n", "\n", "\n", "def report(issues, show_urls=False):\n", - " \"\"\"Summary report about a list of issues, printing number and title.\n", - " \"\"\"\n", + " \"\"\"Summary report about a list of issues, printing number and title.\"\"\"\n", " # titles may have unicode in them, so we must encode everything below\n", " if show_urls:\n", " for i in issues:\n", - " role = 'ghpull' if 'merged_at' in i else 'ghissue'\n", - " print('* :%s:`%d`: %s' % (role, i['number'],\n", - " i['title'].encode('utf-8')))\n", + " role = \"ghpull\" if \"merged_at\" in i else \"ghissue\"\n", + " print(\"* :%s:`%d`: %s\" % (role, i[\"number\"], i[\"title\"].encode(\"utf-8\")))\n", " else:\n", " for i in issues:\n", - " print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))\n", - "\n" + " print(\"* %d: %s\" % (i[\"number\"], i[\"title\"].encode(\"utf-8\")))" ] }, { @@ -369,18 +385,17 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "all_issues = {}\n", "all_pulls = {}\n", "total_commits = 0\n", - "#prj='pysal/libpysal'\n", - "prj = 'pysal/{package}'.format(package=package_name)\n", - "issues = issues_closed_since(since, project=prj,pulls=False)\n", - "pulls = issues_closed_since(since, project=prj,pulls=True)\n", + "# prj='pysal/libpysal'\n", + "prj = \"pysal/{package}\".format(package=package_name)\n", + "issues = issues_closed_since(since, project=prj, pulls=False)\n", + "pulls = issues_closed_since(since, project=prj, pulls=True)\n", "issues = sorted_by_field(issues, reverse=True)\n", "pulls = sorted_by_field(pulls, reverse=True)\n", "n_issues, n_pulls = map(len, (issues, pulls))\n", - "n_total = n_issues + n_pulls\n" + "n_total = n_issues + n_pulls" ] }, { @@ -391,7 +406,7 @@ "source": [ "issue_listing = []\n", "for issue in issues:\n", - " entry = \"{title} (#{number})\".format(title=issue['title'],number=issue['number'])\n", + " entry = \"{title} (#{number})\".format(title=issue[\"title\"], number=issue[\"number\"])\n", " issue_listing.append(entry)" ] }, @@ -403,7 +418,7 @@ "source": [ "pull_listing = []\n", "for pull in pulls:\n", - " entry = \"{title} (#{number})\".format(title=pull['title'],number=pull['number'])\n", + " entry = \"{title} (#{number})\".format(title=pull[\"title\"], number=pull[\"number\"])\n", " pull_listing.append(entry)" ] }, @@ -422,7 +437,9 @@ "metadata": {}, "outputs": [], "source": [ - "message = \"We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests\".format(total=n_total, pr=n_pulls)" + "message = \"We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests\".format(\n", + " total=n_total, pr=n_pulls\n", + ")" ] }, { @@ -431,7 +448,9 @@ "metadata": {}, "outputs": [], "source": [ - "message = \"{msg}, since our last release on {previous}.\".format(msg=message, previous=str(start_date))\n" + "message = \"{msg}, since our last release on {previous}.\".format(\n", + " msg=message, previous=str(start_date)\n", + ")" ] }, { @@ -467,10 +486,10 @@ "metadata": {}, "outputs": [], "source": [ - "issues = \"\\n\".join([\" - \"+issue for issue in issue_listing])\n", + "issues = \"\\n\".join([\" - \" + issue for issue in issue_listing])\n", "message += issues\n", "message += \"\\n\\n## Pull Requests\\n\"\n", - "pulls = \"\\n\".join([\" - \"+pull for pull in pull_listing])\n", + "pulls = \"\\n\".join([\" - \" + pull for pull in pull_listing])\n", "message += pulls" ] }, @@ -489,7 +508,7 @@ "metadata": {}, "outputs": [], "source": [ - "people = \"\\n\".join([\" - \"+person for person in unique_authors])" + "people = \"\\n\".join([\" - \" + person for person in unique_authors])" ] }, { @@ -507,7 +526,11 @@ "metadata": {}, "outputs": [], "source": [ - "message +=\"\\n\\nThe following individuals contributed to this release:\\n\\n{people}\".format(people=people)" + "message += (\n", + " \"\\n\\nThe following individuals contributed to this release:\\n\\n{people}\".format(\n", + " people=people\n", + " )\n", + ")" ] }, { @@ -525,7 +548,9 @@ "metadata": {}, "outputs": [], "source": [ - "head = \"# Version {version} ({release_date})\\n\\n\".format(version=__version__, release_date=release_date)" + "head = \"# Version {version} ({release_date})\\n\\n\".format(\n", + " version=__version__, release_date=release_date\n", + ")" ] }, { @@ -544,7 +569,7 @@ "metadata": {}, "outputs": [], "source": [ - "# #insert the new changes in the begining of CHANGELOG.md \n", + "# #insert the new changes in the begining of CHANGELOG.md\n", "# with open(\"../CHANGELOG.md\", 'r+') as file:\n", "# content = file.read()\n", "# file.seek(0, 0)\n", @@ -558,9 +583,9 @@ "outputs": [], "source": [ "# outfile = 'changelog_{version}.md'.format(version=__version__)\n", - "outfile = 'changelog.md'\n", - "with open(outfile, 'w') as of:\n", - " of.write(head+message)" + "outfile = \"changelog.md\"\n", + "with open(outfile, \"w\") as of:\n", + " of.write(head + message)" ] }, {