From 50ac4981e0b2dca5fc0c353ddcf3b900925bce9b Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Thu, 13 Jan 2022 01:38:43 +0100 Subject: [PATCH] CI/DOC: replace master -> main branch (#45336) --- .github/ISSUE_TEMPLATE/bug_report.yaml | 2 +- .../documentation_improvement.yaml | 2 +- .github/ISSUE_TEMPLATE/performance_issue.yaml | 2 +- .github/workflows/asv-bot.yml | 4 +-- .github/workflows/code-checks.yml | 4 +-- .github/workflows/comment_bot.yml | 2 +- .github/workflows/datamanger.yml | 4 +-- .github/workflows/docbuild-and-upload.yml | 10 +++--- .github/workflows/posix.yml | 4 +-- .github/workflows/python-dev.yml | 4 +-- .github/workflows/sdist.yml | 4 +-- Dockerfile | 2 +- Makefile | 2 +- README.md | 8 ++--- azure-pipelines.yml | 4 +-- codecov.yml | 2 +- doc/source/conf.py | 2 +- doc/source/development/contributing.rst | 34 +++++++++---------- .../development/contributing_codebase.rst | 26 +++++++------- .../development/contributing_docstring.rst | 2 +- .../contributing_documentation.rst | 4 +-- .../development/contributing_environment.rst | 2 +- doc/source/development/extending.rst | 12 +++---- .../comparison/comparison_with_sas.rst | 4 +-- .../comparison_with_spreadsheets.rst | 4 +-- .../comparison/comparison_with_stata.rst | 4 +-- .../07_reshape_table_layout.rst | 2 +- .../intro_tutorials/08_combine_dataframes.rst | 4 +-- .../intro_tutorials/09_timeseries.rst | 2 +- .../includes/air_quality_no2.rst | 2 +- .../intro_tutorials/includes/titanic.rst | 2 +- doc/source/user_guide/options.rst | 2 +- doc/source/user_guide/style.ipynb | 2 +- doc/source/user_guide/visualization.rst | 4 +-- doc/source/user_guide/window.rst | 2 +- doc/source/whatsnew/v0.19.2.rst | 2 +- doc/source/whatsnew/v0.20.0.rst | 2 +- pandas/plotting/_misc.py | 4 +-- pandas/tests/io/excel/test_readers.py | 2 +- .../tests/io/generate_legacy_storage_files.py | 2 +- .../io/parser/common/test_file_buffer_url.py | 2 +- pandas/tests/io/parser/test_network.py | 4 +-- pandas/tests/io/test_feather.py | 2 +- pandas/tests/io/test_parquet.py | 2 +- 44 files changed, 99 insertions(+), 99 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 805413d79aae2..36bc8dcf02bae 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -17,7 +17,7 @@ body: [latest version](https://pandas.pydata.org/docs/whatsnew/index.html) of pandas. required: true - label: > - I have confirmed this bug exists on the master branch of pandas. + I have confirmed this bug exists on the main branch of pandas. - type: textarea id: example attributes: diff --git a/.github/ISSUE_TEMPLATE/documentation_improvement.yaml b/.github/ISSUE_TEMPLATE/documentation_improvement.yaml index 8486d6e3eebdc..b89600f8598e7 100644 --- a/.github/ISSUE_TEMPLATE/documentation_improvement.yaml +++ b/.github/ISSUE_TEMPLATE/documentation_improvement.yaml @@ -10,7 +10,7 @@ body: options: - label: > I have checked that the issue still exists on the latest versions of the docs - on `master` [here](https://pandas.pydata.org/docs/dev/) + on `main` [here](https://pandas.pydata.org/docs/dev/) required: true - type: textarea id: location diff --git a/.github/ISSUE_TEMPLATE/performance_issue.yaml b/.github/ISSUE_TEMPLATE/performance_issue.yaml index 9cde5b6dca385..096e012f4ee0f 100644 --- a/.github/ISSUE_TEMPLATE/performance_issue.yaml +++ b/.github/ISSUE_TEMPLATE/performance_issue.yaml @@ -17,7 +17,7 @@ body: [latest version](https://pandas.pydata.org/docs/whatsnew/index.html) of pandas. required: true - label: > - I have confirmed this issue exists on the master branch of pandas. + I have confirmed this issue exists on the main branch of pandas. - type: textarea id: example attributes: diff --git a/.github/workflows/asv-bot.yml b/.github/workflows/asv-bot.yml index c2a49dd96c1c1..ad2e2b4278dd2 100644 --- a/.github/workflows/asv-bot.yml +++ b/.github/workflows/asv-bot.yml @@ -59,9 +59,9 @@ jobs: git remote add upstream https://github.com/pandas-dev/pandas.git git fetch upstream asv machine --yes - asv continuous -f 1.1 -b $REGEX upstream/master HEAD + asv continuous -f 1.1 -b $REGEX upstream/main HEAD echo 'BENCH_OUTPUT<> $GITHUB_ENV - asv compare -f 1.1 upstream/master HEAD >> $GITHUB_ENV + asv compare -f 1.1 upstream/main HEAD >> $GITHUB_ENV echo 'EOF' >> $GITHUB_ENV echo "REGEX=$REGEX" >> $GITHUB_ENV diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 6bf3577f2bd3d..838143deafb03 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -3,11 +3,11 @@ name: Code Checks on: push: branches: - - master + - main - 1.4.x pull_request: branches: - - master + - main - 1.4.x env: diff --git a/.github/workflows/comment_bot.yml b/.github/workflows/comment_bot.yml index dc396be753269..24f8d5d0116c5 100644 --- a/.github/workflows/comment_bot.yml +++ b/.github/workflows/comment_bot.yml @@ -29,7 +29,7 @@ jobs: - name: Install-pre-commit run: python -m pip install --upgrade pre-commit - name: Run pre-commit - run: pre-commit run --from-ref=origin/master --to-ref=HEAD --all-files || (exit 0) + run: pre-commit run --from-ref=origin/main --to-ref=HEAD --all-files || (exit 0) - name: Commit results run: | git config user.name "$(git log -1 --pretty=format:%an)" diff --git a/.github/workflows/datamanger.yml b/.github/workflows/datamanger.yml index 896064e1cccea..3fc515883a225 100644 --- a/.github/workflows/datamanger.yml +++ b/.github/workflows/datamanger.yml @@ -3,11 +3,11 @@ name: Data Manager on: push: branches: - - master + - main - 1.4.x pull_request: branches: - - master + - main - 1.4.x env: diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml index e9c813a51ff78..e8ed6d4545194 100644 --- a/.github/workflows/docbuild-and-upload.yml +++ b/.github/workflows/docbuild-and-upload.yml @@ -3,11 +3,11 @@ name: Doc Build and Upload on: push: branches: - - master + - main - 1.4.x pull_request: branches: - - master + - main - 1.4.x env: @@ -53,18 +53,18 @@ jobs: echo "${{ secrets.server_ssh_key }}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa echo "${{ secrets.server_ip }} ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE1Kkopomm7FHG5enATf7SgnpICZ4W2bw+Ho+afqin+w7sMcrsa0je7sbztFAV8YchDkiBKnWTG4cRT+KZgZCaY=" > ~/.ssh/known_hosts - if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} + if: ${{github.event_name == 'push' && github.ref == 'refs/heads/main'}} - name: Copy cheatsheets into site directory run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/ - name: Upload web run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas - if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} + if: ${{github.event_name == 'push' && github.ref == 'refs/heads/main'}} - name: Upload dev docs run: rsync -az --delete doc/build/html/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas/pandas-docs/dev - if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} + if: ${{github.event_name == 'push' && github.ref == 'refs/heads/main'}} - name: Move docs into site directory run: mv doc/build/html web/build/docs diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml index 7f4d3e4534278..3e034ec9460a0 100644 --- a/.github/workflows/posix.yml +++ b/.github/workflows/posix.yml @@ -3,11 +3,11 @@ name: Posix on: push: branches: - - master + - main - 1.4.x pull_request: branches: - - master + - main - 1.4.x paths-ignore: - "doc/**" diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index 389607f1b356a..3f9ea2aac22c7 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -3,11 +3,11 @@ name: Python Dev on: push: branches: - - master + - main - 1.4.x pull_request: branches: - - master + - main - 1.4.x paths-ignore: - "doc/**" diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index ace22d716e065..dd030f1aacc44 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -3,11 +3,11 @@ name: sdist on: push: branches: - - master + - main - 1.4.x pull_request: branches: - - master + - main - 1.4.x paths-ignore: - "doc/**" diff --git a/Dockerfile b/Dockerfile index de1c564921de9..8887e80566772 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,7 +28,7 @@ RUN mkdir "$pandas_home" \ && git clone "https://github.com/$gh_username/pandas.git" "$pandas_home" \ && cd "$pandas_home" \ && git remote add upstream "https://github.com/pandas-dev/pandas.git" \ - && git pull upstream master + && git pull upstream main # Because it is surprisingly difficult to activate a conda environment inside a DockerFile # (from personal experience and per https://github.com/ContinuumIO/docker-images/issues/89), diff --git a/Makefile b/Makefile index 1fdd3cfdcf027..c0aa685ed47ac 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ build: clean_pyc python setup.py build_ext lint-diff: - git diff upstream/master --name-only -- "*.py" | xargs flake8 + git diff upstream/main --name-only -- "*.py" | xargs flake8 black: black . diff --git a/README.md b/README.md index bde815939239d..26aed081de4af 100644 --- a/README.md +++ b/README.md @@ -9,9 +9,9 @@ [![Conda Latest Release](https://anaconda.org/conda-forge/pandas/badges/version.svg)](https://anaconda.org/anaconda/pandas/) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3509134.svg)](https://doi.org/10.5281/zenodo.3509134) [![Package Status](https://img.shields.io/pypi/status/pandas.svg)](https://pypi.org/project/pandas/) -[![License](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/master/LICENSE) -[![Azure Build Status](https://dev.azure.com/pandas-dev/pandas/_apis/build/status/pandas-dev.pandas?branch=master)](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=master) -[![Coverage](https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master)](https://codecov.io/gh/pandas-dev/pandas) +[![License](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/main/LICENSE) +[![Azure Build Status](https://dev.azure.com/pandas-dev/pandas/_apis/build/status/pandas-dev.pandas?branch=main)](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=main) +[![Coverage](https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=main)](https://codecov.io/gh/pandas-dev/pandas) [![Downloads](https://static.pepy.tech/personalized-badge/pandas?period=month&units=international_system&left_color=black&right_color=orange&left_text=PyPI%20downloads%20per%20month)](https://pepy.tech/project/pandas) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/pydata/pandas) [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) @@ -170,4 +170,4 @@ Or maybe through using pandas you have an idea of your own or are looking for so Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas). -As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/pandas/blob/master/.github/CODE_OF_CONDUCT.md) +As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/pandas/blob/main/.github/CODE_OF_CONDUCT.md) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 6f221cd62797d..9c04d10707a64 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -2,7 +2,7 @@ trigger: branches: include: - - master + - main - 1.4.x paths: exclude: @@ -12,7 +12,7 @@ pr: autoCancel: true branches: include: - - master + - main - 1.4.x variables: diff --git a/codecov.yml b/codecov.yml index 883f9fbb20729..d893bdbdc9298 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,5 +1,5 @@ codecov: - branch: master + branch: main notify: after_n_builds: 10 comment: false diff --git a/doc/source/conf.py b/doc/source/conf.py index 0096b3337e19a..ea878f052f1f8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -651,7 +651,7 @@ def linkcode_resolve(domain, info): fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__)) if "+" in pandas.__version__: - return f"https://github.com/pandas-dev/pandas/blob/master/pandas/{fn}{linespec}" + return f"https://github.com/pandas-dev/pandas/blob/main/pandas/{fn}{linespec}" else: return ( f"https://github.com/pandas-dev/pandas/blob/" diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 9b3d50069b077..1d745d21dacae 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -59,7 +59,7 @@ will allow others to reproduce the bug and provide insight into fixing. See `this blogpost `_ for tips on writing a good bug report. -Trying the bug-producing code out on the *master* branch is often a worthwhile exercise +Trying the bug-producing code out on the *main* branch is often a worthwhile exercise to confirm the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed. @@ -143,7 +143,7 @@ as the version number cannot be computed anymore. Creating a branch ----------------- -You want your master branch to reflect only production-ready code, so create a +You want your main branch to reflect only production-ready code, so create a feature branch for making your changes. For example:: git branch shiny-new-feature @@ -158,14 +158,14 @@ changes in this branch specific to one bug or feature so it is clear what the branch brings to pandas. You can have many shiny-new-features and switch in between them using the git checkout command. -When creating this branch, make sure your master branch is up to date with -the latest upstream master version. To update your local master branch, you +When creating this branch, make sure your main branch is up to date with +the latest upstream main version. To update your local main branch, you can do:: - git checkout master - git pull upstream master --ff-only + git checkout main + git pull upstream main --ff-only -When you want to update the feature branch with changes in master after +When you want to update the feature branch with changes in main after you created the branch, check the section on :ref:`updating a PR `. @@ -256,7 +256,7 @@ double check your branch changes against the branch it was based on: #. Navigate to your repository on GitHub -- https://github.com/your-user-name/pandas #. Click on ``Branches`` #. Click on the ``Compare`` button for your feature branch -#. Select the ``base`` and ``compare`` branches, if necessary. This will be ``master`` and +#. Select the ``base`` and ``compare`` branches, if necessary. This will be ``main`` and ``shiny-new-feature``, respectively. Finally, make the pull request @@ -264,8 +264,8 @@ Finally, make the pull request If everything looks good, you are ready to make a pull request. A pull request is how code from a local repository becomes available to the GitHub community and can be looked -at and eventually merged into the master version. This pull request and its associated -changes will eventually be committed to the master branch and available in the next +at and eventually merged into the main version. This pull request and its associated +changes will eventually be committed to the main branch and available in the next release. To submit a pull request: #. Navigate to your repository on GitHub @@ -294,14 +294,14 @@ This will automatically update your pull request with the latest code and restar :any:`Continuous Integration ` tests. Another reason you might need to update your pull request is to solve conflicts -with changes that have been merged into the master branch since you opened your +with changes that have been merged into the main branch since you opened your pull request. -To do this, you need to "merge upstream master" in your branch:: +To do this, you need to "merge upstream main" in your branch:: git checkout shiny-new-feature git fetch upstream - git merge upstream/master + git merge upstream/main If there are no conflicts (or they could be fixed automatically), a file with a default commit message will open, and you can simply save and quit this file. @@ -313,7 +313,7 @@ Once the conflicts are merged and the files where the conflicts were solved are added, you can run ``git commit`` to save those fixes. If you have uncommitted changes at the moment you want to update the branch with -master, you will need to ``stash`` them prior to updating (see the +main, you will need to ``stash`` them prior to updating (see the `stash docs `__). This will effectively store your changes and they can be reapplied after updating. @@ -342,12 +342,12 @@ Delete your merged branch (optional) ------------------------------------ Once your feature branch is accepted into upstream, you'll probably want to get rid of -the branch. First, merge upstream master into your branch so git knows it is safe to +the branch. First, merge upstream main into your branch so git knows it is safe to delete your branch:: git fetch upstream - git checkout master - git merge upstream/master + git checkout main + git merge upstream/main Then you can do:: diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index 41fe88e02318a..b7b9cb9ac3aed 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -69,9 +69,9 @@ to run its checks with:: without needing to have done ``pre-commit install`` beforehand. -If you want to run checks on all recently committed files on upstream/master you can use:: +If you want to run checks on all recently committed files on upstream/main you can use:: - pre-commit run --from-ref=upstream/master --to-ref=HEAD --all-files + pre-commit run --from-ref=upstream/main --to-ref=HEAD --all-files without needing to have done ``pre-commit install`` beforehand. @@ -163,7 +163,7 @@ report any stylistic errors in your code. Therefore, it is helpful before submitting code to run the check yourself:: black pandas - git diff upstream/master -u -- "*.py" | flake8 --diff + git diff upstream/main -u -- "*.py" | flake8 --diff to auto-format your code. Additionally, many editors have plugins that will apply ``black`` as you edit files. @@ -171,7 +171,7 @@ apply ``black`` as you edit files. You should use a ``black`` version 21.5b2 as previous versions are not compatible with the pandas codebase. -One caveat about ``git diff upstream/master -u -- "*.py" | flake8 --diff``: this +One caveat about ``git diff upstream/main -u -- "*.py" | flake8 --diff``: this command will catch any stylistic errors in your changes specifically, but be beware it may not catch all of them. For example, if you delete the only usage of an imported function, it is stylistically incorrect to import an @@ -179,18 +179,18 @@ unused function. However, style-checking the diff will not catch this because the actual import is not part of the diff. Thus, for completeness, you should run this command, though it may take longer:: - git diff upstream/master --name-only -- "*.py" | xargs -r flake8 + git diff upstream/main --name-only -- "*.py" | xargs -r flake8 Note that on macOS, the ``-r`` flag is not available, so you have to omit it and run this slightly modified command:: - git diff upstream/master --name-only -- "*.py" | xargs flake8 + git diff upstream/main --name-only -- "*.py" | xargs flake8 Windows does not support the ``xargs`` command (unless installed for example via the `MinGW `__ toolchain), but one can imitate the behaviour as follows:: - for /f %i in ('git diff upstream/master --name-only -- "*.py"') do flake8 %i + for /f %i in ('git diff upstream/main --name-only -- "*.py"') do flake8 %i This will get all the files being changed by the PR (and ending with ``.py``), and run ``flake8`` on them, one after the other. @@ -242,7 +242,7 @@ to automatically format imports correctly. This will modify your local copy of t Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above `:: - git diff upstream/master --name-only -- "*.py" | xargs -r isort + git diff upstream/main --name-only -- "*.py" | xargs -r isort Where similar caveats apply if you are on macOS or Windows. @@ -389,7 +389,7 @@ With custom types and inference this is not always possible so exceptions are ma pandas-specific types ~~~~~~~~~~~~~~~~~~~~~ -Commonly used types specific to pandas will appear in `pandas._typing `_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas. +Commonly used types specific to pandas will appear in `pandas._typing `_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas. For example, quite a few functions in pandas accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module @@ -792,14 +792,14 @@ To install asv:: If you need to run a benchmark, change your directory to ``asv_bench/`` and run:: - asv continuous -f 1.1 upstream/master HEAD + asv continuous -f 1.1 upstream/main HEAD You can replace ``HEAD`` with the name of the branch you are working on, and report benchmarks that changed by more than 10%. The command uses ``conda`` by default for creating the benchmark environments. If you want to use virtualenv instead, write:: - asv continuous -f 1.1 -E virtualenv upstream/master HEAD + asv continuous -f 1.1 -E virtualenv upstream/main HEAD The ``-E virtualenv`` option should be added to all ``asv`` commands that run benchmarks. The default value is defined in ``asv.conf.json``. @@ -811,12 +811,12 @@ do not cause unexpected performance regressions. You can run specific benchmark using the ``-b`` flag, which takes a regular expression. For example, this will only run benchmarks from a ``pandas/asv_bench/benchmarks/groupby.py`` file:: - asv continuous -f 1.1 upstream/master HEAD -b ^groupby + asv continuous -f 1.1 upstream/main HEAD -b ^groupby If you want to only run a specific group of benchmarks from a file, you can do it using ``.`` as a separator. For example:: - asv continuous -f 1.1 upstream/master HEAD -b groupby.GroupByMethods + asv continuous -f 1.1 upstream/main HEAD -b groupby.GroupByMethods will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``. diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst index 623d1e8d45565..a87d8d5ad44bf 100644 --- a/doc/source/development/contributing_docstring.rst +++ b/doc/source/development/contributing_docstring.rst @@ -68,7 +68,7 @@ explained in this document: * `numpydoc docstring guide `_ (which is based in the original `Guide to NumPy/SciPy documentation - `_) + `_) numpydoc is a Sphinx extension to support the NumPy docstring convention. diff --git a/doc/source/development/contributing_documentation.rst b/doc/source/development/contributing_documentation.rst index a4a4f781d9dad..39bc582511148 100644 --- a/doc/source/development/contributing_documentation.rst +++ b/doc/source/development/contributing_documentation.rst @@ -202,10 +202,10 @@ And you'll have the satisfaction of seeing your new and improved documentation! .. _contributing.dev_docs: -Building master branch documentation +Building main branch documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When pull requests are merged into the pandas ``master`` branch, the main parts of +When pull requests are merged into the pandas ``main`` branch, the main parts of the documentation are also built by Travis-CI. These docs are then hosted `here `__, see also the :any:`Continuous Integration ` section. diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst index 4ea3701dec029..fda4f3ecf6dbf 100644 --- a/doc/source/development/contributing_environment.rst +++ b/doc/source/development/contributing_environment.rst @@ -47,7 +47,7 @@ Enable Docker support and use the Services tool window to build and manage image run and interact with containers. See https://www.jetbrains.com/help/pycharm/docker.html for details. -Note that you might need to rebuild the C extensions if/when you merge with upstream/master using:: +Note that you might need to rebuild the C extensions if/when you merge with upstream/main using:: python setup.py build_ext -j 4 diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst index c10fcf6eacfc7..a7a10e192a9a7 100644 --- a/doc/source/development/extending.rst +++ b/doc/source/development/extending.rst @@ -231,7 +231,7 @@ Testing extension arrays We provide a test suite for ensuring that your extension arrays satisfy the expected behavior. To use the test suite, you must provide several pytest fixtures and inherit from the base test class. The required fixtures are found in -https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/conftest.py. +https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/conftest.py. To use a test, subclass it: @@ -244,7 +244,7 @@ To use a test, subclass it: pass -See https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/base/__init__.py +See https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/base/__init__.py for a list of all the tests available. .. _extending.extension.arrow: @@ -290,9 +290,9 @@ See more in the `Arrow documentation `_) +tests (`csv `_) will be used in many of the following examples. SAS provides ``PROC IMPORT`` to read csv data into a data set. @@ -113,7 +113,7 @@ The pandas method is :func:`read_csv`, which works similarly. url = ( "https://raw.github.com/pandas-dev/" - "pandas/master/pandas/tests/io/data/csv/tips.csv" + "pandas/main/pandas/tests/io/data/csv/tips.csv" ) tips = pd.read_csv(url) tips diff --git a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst index 19999be9b461f..e3380db7c821e 100644 --- a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst +++ b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst @@ -92,7 +92,7 @@ formats. CSV ''' -Let's load and display the `tips `_ +Let's load and display the `tips `_ dataset from the pandas tests, which is a CSV file. In Excel, you would download and then `open the CSV `_. In pandas, you pass the URL or local path of the CSV file to :func:`~pandas.read_csv`: @@ -101,7 +101,7 @@ In pandas, you pass the URL or local path of the CSV file to :func:`~pandas.read url = ( "https://raw.github.com/pandas-dev" - "/pandas/master/pandas/tests/io/data/csv/tips.csv" + "/pandas/main/pandas/tests/io/data/csv/tips.csv" ) tips = pd.read_csv(url) tips diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst index 94c45adcccc82..9831f8e29b338 100644 --- a/doc/source/getting_started/comparison/comparison_with_stata.rst +++ b/doc/source/getting_started/comparison/comparison_with_stata.rst @@ -92,7 +92,7 @@ Reading external data Like Stata, pandas provides utilities for reading in data from many formats. The ``tips`` data set, found within the pandas -tests (`csv `_) +tests (`csv `_) will be used in many of the following examples. Stata provides ``import delimited`` to read csv data into a data set in memory. @@ -109,7 +109,7 @@ the data set if presented with a url. url = ( "https://raw.github.com/pandas-dev" - "/pandas/master/pandas/tests/io/data/csv/tips.csv" + "/pandas/main/pandas/tests/io/data/csv/tips.csv" ) tips = pd.read_csv(url) tips diff --git a/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst b/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst index bd4a617fe753b..d09511143787a 100644 --- a/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst +++ b/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst @@ -67,7 +67,7 @@ measurement. .. raw:: html

- To raw data + To raw data diff --git a/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst b/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst index be4c284912db4..0b165c4aaa94e 100644 --- a/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst +++ b/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst @@ -34,7 +34,7 @@ Westminster* in respectively Paris, Antwerp and London. .. raw:: html

- To raw data + To raw data @@ -69,7 +69,7 @@ Westminster* in respectively Paris, Antwerp and London. .. raw:: html

- To raw data + To raw data diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst index b9cab0747196e..1b3c3f2a601e8 100644 --- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst +++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst @@ -35,7 +35,7 @@ Westminster* in respectively Paris, Antwerp and London. .. raw:: html

- To raw data + To raw data diff --git a/doc/source/getting_started/intro_tutorials/includes/air_quality_no2.rst b/doc/source/getting_started/intro_tutorials/includes/air_quality_no2.rst index a5a5442330e43..410062cf46344 100644 --- a/doc/source/getting_started/intro_tutorials/includes/air_quality_no2.rst +++ b/doc/source/getting_started/intro_tutorials/includes/air_quality_no2.rst @@ -17,6 +17,6 @@ in respectively Paris, Antwerp and London. .. raw:: html

- To raw data + To raw data diff --git a/doc/source/getting_started/intro_tutorials/includes/titanic.rst b/doc/source/getting_started/intro_tutorials/includes/titanic.rst index 7032b70b3f1cf..1267a33d605ed 100644 --- a/doc/source/getting_started/intro_tutorials/includes/titanic.rst +++ b/doc/source/getting_started/intro_tutorials/includes/titanic.rst @@ -27,6 +27,6 @@ consists of the following data columns: .. raw:: html

- To raw data + To raw data diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst index 93448dae578c9..f6e98b68afdc9 100644 --- a/doc/source/user_guide/options.rst +++ b/doc/source/user_guide/options.rst @@ -31,7 +31,7 @@ namespace: * :func:`~pandas.option_context` - execute a codeblock with a set of options that revert to prior settings after execution. -**Note:** Developers can check out `pandas/core/config_init.py `_ for more information. +**Note:** Developers can check out `pandas/core/config_init.py `_ for more information. All of the functions above accept a regexp pattern (``re.search`` style) as an argument, and so passing in a substring will work - as long as it is unambiguous: diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index d64b36f1dbffe..38b4b83493c45 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -11,7 +11,7 @@ "\n", "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n", "[viz]: visualization.rst\n", - "[download]: https://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb" + "[download]: https://nbviewer.ipython.org/github/pandas-dev/pandas/blob/main/doc/source/user_guide/style.ipynb" ] }, { diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index fd0af7583f5dc..de5058466693e 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -956,7 +956,7 @@ for more information. By coloring these curves differently for each class it is possible to visualize data clustering. Curves belonging to samples of the same class will usually be closer together and form larger structures. -**Note**: The "Iris" dataset is available `here `__. +**Note**: The "Iris" dataset is available `here `__. .. ipython:: python @@ -1116,7 +1116,7 @@ be colored differently. See the R package `Radviz `__ for more information. -**Note**: The "Iris" dataset is available `here `__. +**Note**: The "Iris" dataset is available `here `__. .. ipython:: python diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index 3e533cbadc5f7..dea3e8f3089e2 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -287,7 +287,7 @@ and we want to use an expanding window where ``use_expanding`` is ``True`` other 3 3.0 4 10.0 -You can view other examples of ``BaseIndexer`` subclasses `here `__ +You can view other examples of ``BaseIndexer`` subclasses `here `__ .. versionadded:: 1.1 diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst index bba89d78be869..db9d9e65c923d 100644 --- a/doc/source/whatsnew/v0.19.2.rst +++ b/doc/source/whatsnew/v0.19.2.rst @@ -18,7 +18,7 @@ We recommend that all users upgrade to this version. Highlights include: - Compatibility with Python 3.6 -- Added a `Pandas Cheat Sheet `__. (:issue:`13202`). +- Added a `Pandas Cheat Sheet `__. (:issue:`13202`). .. contents:: What's new in v0.19.2 diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index 239431b7621c6..cdd10014e71f0 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -188,7 +188,7 @@ support for bz2 compression in the python 2 C-engine improved (:issue:`14874`). url = ('https://github.com/{repo}/raw/{branch}/{path}' .format(repo='pandas-dev/pandas', - branch='master', + branch='main', path='pandas/tests/io/parser/data/salaries.csv.bz2')) # default, infer compression df = pd.read_csv(url, sep='\t', compression='infer') diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index ac7e467aa3d9f..49cd7ded3f74e 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -280,7 +280,7 @@ def andrews_curves( >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' - ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' + ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.andrews_curves(df, 'Name') @@ -406,7 +406,7 @@ def parallel_coordinates( >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' - ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' + ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 7d18c2e63e74f..1c5557216fd6a 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -769,7 +769,7 @@ def test_corrupt_bytes_raises(self, read_ext, engine): @tm.network def test_read_from_http_url(self, read_ext): url = ( - "https://raw.githubusercontent.com/pandas-dev/pandas/master/" + "https://raw.githubusercontent.com/pandas-dev/pandas/main/" "pandas/tests/io/data/excel/test1" + read_ext ) url_table = pd.read_excel(url) diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index dede9127821fd..8f03655ec27cc 100644 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -23,7 +23,7 @@ generate a pickle file. We will then check this file into a current branch, and test using test_pickle.py. This will load the *older* pickles and test versus the current data that is generated -(with master). These are then compared. +(with main). These are then compared. If we have cases where we changed the signature (e.g. we renamed offset -> freq in Timestamp). Then we have to conditionally execute diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py index 4a8f734a34abf..37b0239516b72 100644 --- a/pandas/tests/io/parser/common/test_file_buffer_url.py +++ b/pandas/tests/io/parser/common/test_file_buffer_url.py @@ -32,7 +32,7 @@ def test_url(all_parsers, csv_dir_path): kwargs = {"sep": "\t"} url = ( - "https://raw.github.com/pandas-dev/pandas/master/" + "https://raw.github.com/pandas-dev/pandas/main/" "pandas/tests/io/parser/data/salaries.csv" ) url_result = parser.read_csv(url, **kwargs) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 21cdf6f3274df..4e89fdcd700f4 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -34,7 +34,7 @@ def check_compressed_urls(salaries_table, compression, extension, mode, engine): # test reading compressed urls with various engines and # extension inference base_url = ( - "https://github.com/pandas-dev/pandas/raw/master/" + "https://github.com/pandas-dev/pandas/raw/main/" "pandas/tests/io/parser/data/salaries.csv" ) @@ -55,7 +55,7 @@ def test_url_encoding_csv(): GH 10424 """ path = ( - "https://raw.githubusercontent.com/pandas-dev/pandas/master/" + "https://raw.githubusercontent.com/pandas-dev/pandas/main/" + "pandas/tests/io/parser/data/unicode_series.csv" ) df = read_csv(path, encoding="latin-1", header=None) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 15d41c56c13c1..19b5ef51e738c 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -191,7 +191,7 @@ def test_passthrough_keywords(self): def test_http_path(self, feather_file): # GH 29055 url = ( - "https://raw.githubusercontent.com/pandas-dev/pandas/master/" + "https://raw.githubusercontent.com/pandas-dev/pandas/main/" "pandas/tests/io/data/feather/feather-0_3_1.feather" ) expected = read_feather(feather_file) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index ea88454ce7963..e6cf2d437731f 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -385,7 +385,7 @@ def test_parquet_read_from_url(self, df_compat, engine): pytest.importorskip(engine) url = ( "https://raw.githubusercontent.com/pandas-dev/pandas/" - "master/pandas/tests/io/data/parquet/simple.parquet" + "main/pandas/tests/io/data/parquet/simple.parquet" ) df = read_parquet(url) tm.assert_frame_equal(df, df_compat)