1
1
#!/usr/bin/env python
2
- """Fetch wheels from wheels.scipy .org for a pandas version."""
2
+ """Fetch wheels from anaconda .org for a pandas version."""
3
3
import argparse
4
- import pathlib
4
+ import io
5
+ import os
5
6
import sys
6
- import urllib .parse
7
7
import urllib .request
8
8
9
- from lxml import html
9
+ import requests
10
+ import lxml .html
10
11
11
12
12
13
def parse_args (args = None ):
@@ -16,26 +17,26 @@ def parse_args(args=None):
16
17
17
18
18
19
def fetch (version ):
19
- base = "http://wheels.scipy.org"
20
- tree = html .parse (base )
21
- root = tree .getroot ()
22
-
23
- dest = pathlib .Path ("dist" )
24
- dest .mkdir (exist_ok = True )
25
-
26
- files = [
27
- x
28
- for x in root .xpath ("//a/text()" )
29
- if x .startswith (f"pandas-{ version } " ) and not dest .joinpath (x ).exists ()
20
+ url = f"https://anaconda.org/multibuild-wheels-staging/pandas/files?version={ version } "
21
+ r = requests .get (url )
22
+ t = io .StringIO (r .text )
23
+
24
+ root = lxml .html .parse (t ).getroot ()
25
+ refs = root .xpath ("/html/body/div[2]/div[2]/div/div[9]/div/form/table/tbody/tr/td[4]/a[2]" )
26
+ base = ("http://api.anaconda.org/download/multibuild-wheels-staging/"
27
+ "pandas/{version}/{whl}" )
28
+
29
+ urls = [
30
+ base .format (version = version , whl = a .text )
31
+ for a in refs
32
+ if not a .text .endswith ('\n ' )
30
33
]
34
+ N = len (urls )
31
35
32
- N = len (files )
33
-
34
- for i , filename in enumerate (files , 1 ):
35
- out = str (dest .joinpath (filename ))
36
- link = urllib .request .urljoin (base , filename )
37
- urllib .request .urlretrieve (link , out )
38
- print (f"Downloaded { link } to { out } [{ i } /{ N } ]" )
36
+ for i , url in enumerate (urls , 1 ):
37
+ filename = os .path .join ("pandas" , "dist" , url .split ("/" )[- 1 ])
38
+ urllib .request .urlretrieve (url , filename )
39
+ print (f"Downloaded { url } to { filename } [{ i } /{ N } ]" )
39
40
40
41
41
42
def main (args = None ):
0 commit comments