1. 自动备份文件夹
import shutil
import datetime
def backup_files(source, destination):
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_folder = f"{destination}/{timestamp}"
shutil.copytree(source, backup_folder)
print(f"已备份至: {backup_folder}")
# 示例用法
source_folder = "/path/to/source"
destination_folder = "/path/to/destination"
backup_files(source_folder, destination_folder)
2. 发送邮件提醒
import smtplib
from email.mime.text import MIMEText
def send_email(subject, body, to):
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = "your_email@example.com"
msg['To'] = to
with smtplib.SMTP('smtp.example.com', 587) as server:
server.starttls()
server.login("your_email@example.com", "your_password")
server.send_message(msg)
# 示例用法
send_email("任务提醒", "别忘了今天的会议!", "receiver@example.com")
3. 自动下载网页内容
import requests
def download_web_content(url, filename):
response = requests.get(url)
if response.status_code == 200:
with open(filename, 'w', encoding='utf-8') as file:
file.write(response.text)
print(f"内容已保存至 {filename}")
else:
print("请求失败,状态码:", response.status_code)
# 示例用法
url = "https://news.example.com/article"
download_web_content(url, "article.txt")
4. 简易密码管理器
import json
def save_credentials(site, username, password, filename="passwords.json"):
data = {site: {"username": username, "password": password}}
with open(filename, 'a+') as file:
file.seek(0)
try:
existing_data = json.load(file)
existing_data.update(data)
file.seek(0)
json.dump(existing_data, file, indent=4)
except json.JSONDecodeError:
json.dump(data, file, indent=4)
def retrieve_credentials(site, filename="passwords.json"):
with open(filename, 'r') as file:
data = json.load(file)
return data.get(site, None)
# 示例用法
save_credentials("example.com", "user1", "pass123")
print(retrieve_credentials("example.com"))
5. 图片下载器
import os
import requests
def download_images(urls, folder="images"):
if not os.path.exists(folder):
os.makedirs(folder)
for url in urls:
response = requests.get(url)
if response.status_code == 200:
filename = os.path.join(folder, url.split("/")[-1])
with open(filename, 'wb') as f:
f.write(response.content)
6. 自动化Excel数据处理
import pandas as pd
def process_excel(file_path):
# 读取Excel文件
df = pd.read_excel(file_path)
# 数据清洗示例:去除空值行
df.dropna(inplace=True)
# 将处理后的数据保存回Excel
output_file_path = file_path.replace('.xlsx', '_processed.xlsx')
df.to_excel(output_file_path, index=False)
print(f"已处理并保存至: {output_file_path}")
# 示例用法
process_excel("/path/to/excel/file.xlsx")
7. 网络速度测试
首先需要安装speedtest-cli库:
pip install speedtest-cli
然后编写脚本如下:
import speedtest
def test_network_speed():
st = speedtest.Speedtest()
download_speed = st.download() / 10**6 # Mbps
upload_speed = st.upload() / 10**6 # Mbps
print(f"下载速度: {download_speed:.2f} Mbps")
print(f"上传速度: {upload_speed:.2f} Mbps")
# 示例用法
test_network_speed()
8. 自动化社交媒体帖子发布
这通常需要API密钥和相应的API文档来实现。以Twitter为例,您需要使用Tweepy库,并设置API访问权限。
首先确保你已经安装了tweepy库:
pip install tweepy
然后创建一个Python脚本来发布推文:
import tweepy
# 填写你的Twitter开发者账号提供的API密钥
API_KEY = 'your_api_key'
API_SECRET_KEY = 'your_api_secret_key'
ACCESS_TOKEN = 'your_access_token'
ACCESS_TOKEN_SECRET = 'your_access_token_secret'
def post_tweet(tweet_text):
# 设置认证信息
auth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# 创建API对象
api = tweepy.API(auth)
try:
# 发布推文
api.update_status(tweet_text)
print("推文已成功发布!")
except Exception as e:
print(f"发布推文时出错: {e}")
# 示例用法
post_tweet("这是通过Python自动发布的测试推文!")
请记得将your_api_key, your_api_secret_key, your_access_token, 和 your_access_token_secret替换为你从Twitter开发者账户获取的实际值。
9. 电脑定时关机
在Windows系统中可以这样操作:
import os
import time
def schedule_shutdown(minutes):
seconds = minutes * 60
time.sleep(seconds)
os.system("shutdown -s -t 0") # Windows命令
# 示例用法
schedule_shutdown(30) # 计划30分钟后关机
10. 监控CPU使用率
使用psutil库来监控CPU使用情况:
import psutil
def monitor_cpu_usage(interval=1):
while True:
cpu_percent = psutil.cpu_percent(interval=interval)
print(f"当前CPU使用率: {cpu_percent}%")
# 示例用法
monitor_cpu_usage()
11. 解析和提取HTML
from bs4 import BeautifulSoup
import requests
def scrape_data(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# 提取所需数据...
12. 批量重命名文件
import os
def batch_rename_files(directory, prefix):
for idx, filename in enumerate(os.listdir(directory)):
ext = os.path.splitext(filename)[1]
new_name = f"{prefix}_{idx+1}{ext}"
os.rename(os.path.join(directory, filename), os.path.join(directory, new_name))
print(f"重命名为: {new_name}")
# 示例用法
batch_rename_files("/path/to/directory", "new_filename")
13. 清理重复文件
清理重复文件通常涉及计算每个文件的哈希值(如MD5或SHA256),然后比较这些哈希值来找出重复项。下面是一个简单的例子,它会遍历指定目录下的所有文件,并删除重复的文件。
import os
import hashlib
def calculate_md5(file_path):
"""计算给定文件的MD5哈希值"""
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def remove_duplicate_files(directory):
"""在指定目录中查找并删除重复文件"""
file_hashes = {}
duplicates = []
for root, _, files in os.walk(directory):
for filename in files:
file_path = os.path.join(root, filename)
file_hash = calculate_md5(file_path)
if file_hash not in file_hashes:
file_hashes[file_hash] = file_path
else:
duplicates.append(file_path)
# 删除重复文件
for duplicate in duplicates:
os.remove(duplicate)
print(f"已删除重复文件: {duplicate}")
# 示例用法
remove_duplicate_files("/path/to/your/directory")
这段代码首先定义了一个函数calculate_md5来计算文件的MD5哈希值,然后定义了remove_duplicate_files函数来遍历目录中的所有文件,并利用哈希值来识别和删除重复文件。
14. 创建有声读物
可以使用pyttsx3库将文本转换为语音:
import pyttsx3
def text_to_speech(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
# 示例用法
text_to_speech("这是一个测试句子。")
15. PDF编辑器
使用PyPDF4库进行PDF文件合并:
from PyPDF4 import PdfFileMerger
def merge_pdfs(pdf_list, output_path):
merger = PdfFileMerger()
for pdf in pdf_list:
merger.append(pdf)
merger.write(output_path)
merger.close()
print(f"合并完成并保存至: {output_path}")
# 示例用法
merge_pdfs(["file1.pdf", "file2.pdf"], "merged_output.pdf")