Add files using upload-large-folder tool
Browse files- CSD_non_disordered.zip +3 -0
- CSD_non_disordered/.DS_Store +0 -0
- CSD_non_disordered/cleaned_ase.zip +3 -0
- CSD_non_disordered/cleaned_pymatgen.zip +3 -0
- clean.py +40 -0
- process_cif.py +50 -0
- slurm-252872.out +4 -0
- sub_cpu.sh +39 -0
CSD_non_disordered.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64a57930f5c9a2a575f3091c4399cab97f28bfc251911a00fd5d511c386e5e9d
|
3 |
+
size 292253426
|
CSD_non_disordered/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
CSD_non_disordered/cleaned_ase.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1300733f40ef6bf1facf10f2277e1a9ddb51f84e62e2a2cdc07edd5d92b39c38
|
3 |
+
size 361965261
|
CSD_non_disordered/cleaned_pymatgen.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b1c44a69f5825787919fa0a1466008b47ca39a5b120028211a6bfc0495f32e60
|
3 |
+
size 258687792
|
clean.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from tqdm import tqdm
|
3 |
+
from ase.io import read, write
|
4 |
+
from pymatgen.io.cif import CifParser
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
warnings.filterwarnings("ignore")
|
8 |
+
|
9 |
+
|
10 |
+
# Define your input and output folders
|
11 |
+
input_folder = "/users/PAS2490/marcusshen/cmame/Data/CSD_non_disordered/CSD_non_disordered/"
|
12 |
+
output_folder_ase = "/users/PAS2490/marcusshen/cmame/Data/CSD_non_disordered/CSD_non_disordered/cleaned_ase/"
|
13 |
+
output_folder_pymatgen = "/users/PAS2490/marcusshen/cmame/Data/CSD_non_disordered/CSD_non_disordered/cleaned_pymatgen/"
|
14 |
+
|
15 |
+
|
16 |
+
# Create output directories if they do not exist
|
17 |
+
os.makedirs(output_folder_ase, exist_ok=True)
|
18 |
+
os.makedirs(output_folder_pymatgen, exist_ok=True)
|
19 |
+
|
20 |
+
# Get the list of CIF files in the input folder
|
21 |
+
cif_files = [f for f in os.listdir(input_folder) if f.endswith(".cif")]
|
22 |
+
|
23 |
+
# Iterate through all files in the input folder
|
24 |
+
for file_name in tqdm(cif_files, desc="Processing CIF files"):
|
25 |
+
if file_name.endswith(".cif"): # Process only CIF files
|
26 |
+
input_path = os.path.join(input_folder, file_name)
|
27 |
+
ase_output_path = os.path.join(output_folder_ase, file_name)
|
28 |
+
pymatgen_output_path = os.path.join(output_folder_pymatgen, file_name)
|
29 |
+
|
30 |
+
try:
|
31 |
+
# Step 1: Use ASE to clean the structure and save to output folder
|
32 |
+
structure = read(input_path)
|
33 |
+
write(ase_output_path, structure)
|
34 |
+
|
35 |
+
# Step 2: Use Pymatgen to further clean and save the structure
|
36 |
+
parser = CifParser(ase_output_path)
|
37 |
+
structure_pymatgen = parser.get_structures()[0] # Extract the first structure
|
38 |
+
structure_pymatgen.to(filename=pymatgen_output_path)
|
39 |
+
except Exception as e:
|
40 |
+
print(f"Error processing file {file_name}: {e}")
|
process_cif.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from tqdm import tqdm
|
3 |
+
from ase.io import read, write
|
4 |
+
from pymatgen.io.cif import CifParser
|
5 |
+
from multiprocessing import Pool, cpu_count
|
6 |
+
import warnings
|
7 |
+
|
8 |
+
warnings.filterwarnings("ignore")
|
9 |
+
|
10 |
+
# Define your input and output folders
|
11 |
+
input_folder = "/users/PAS2490/marcusshen/cmame/Data/CSD_non_disordered/CSD_non_disordered/"
|
12 |
+
output_folder_ase = "/users/PAS2490/marcusshen/cmame/Data/CSD_non_disordered/CSD_non_disordered/cleaned_ase/"
|
13 |
+
output_folder_pymatgen = "/users/PAS2490/marcusshen/cmame/Data/CSD_non_disordered/CSD_non_disordered/cleaned_pymatgen/"
|
14 |
+
|
15 |
+
# Create output directories if they do not exist
|
16 |
+
os.makedirs(output_folder_ase, exist_ok=True)
|
17 |
+
os.makedirs(output_folder_pymatgen, exist_ok=True)
|
18 |
+
|
19 |
+
# Get the list of CIF files in the input folder
|
20 |
+
cif_files = [f for f in os.listdir(input_folder) if f.endswith(".cif")]
|
21 |
+
|
22 |
+
def process_file(file_name):
|
23 |
+
input_path = os.path.join(input_folder, file_name)
|
24 |
+
ase_output_path = os.path.join(output_folder_ase, file_name)
|
25 |
+
pymatgen_output_path = os.path.join(output_folder_pymatgen, file_name)
|
26 |
+
|
27 |
+
try:
|
28 |
+
# Step 1: Use ASE to clean the structure and save to output folder
|
29 |
+
structure = read(input_path)
|
30 |
+
write(ase_output_path, structure)
|
31 |
+
|
32 |
+
# Step 2: Use Pymatgen to further clean and save the structure
|
33 |
+
parser = CifParser(ase_output_path)
|
34 |
+
structure_pymatgen = parser.get_structures()[0] # Extract the first structure
|
35 |
+
structure_pymatgen.to(filename=pymatgen_output_path)
|
36 |
+
except Exception as e:
|
37 |
+
return f"Error processing file {file_name}: {e}"
|
38 |
+
return f"Successfully processed {file_name}"
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
# Use a Pool to process files in parallel
|
42 |
+
num_workers = cpu_count() # Use all available CPUs
|
43 |
+
with Pool(processes=num_workers) as pool:
|
44 |
+
# Use tqdm for a progress bar
|
45 |
+
results = list(tqdm(pool.imap(process_file, cif_files), total=len(cif_files), desc="Processing CIF files"))
|
46 |
+
|
47 |
+
# Print results
|
48 |
+
for result in results:
|
49 |
+
if "Error" in result:
|
50 |
+
print(result)
|
slurm-252872.out
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Error processing file CUGMPP.cif: data_CSD_CIF_CUGMPP
|
3 |
+
Processing complete!
|
4 |
+
Elapsed time: 01:18:01
|
sub_cpu.sh
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=clean # 作业名称
|
4 |
+
#SBATCH --account=PAS2490 # Project ID
|
5 |
+
#SBATCH --nodes=1 # 节点数
|
6 |
+
#SBATCH --ntasks-per-node=1 # 每个节点的任务数
|
7 |
+
#SBATCH --cpus-per-task=32 # 每个任务使用的 CPU 核心数
|
8 |
+
#SBATCH --mem=100G # 内存限制
|
9 |
+
#SBATCH --time=24:00:00 # 作业运行时间限制
|
10 |
+
#SBATCH --mail-type=BEGIN,END,FAIL
|
11 |
+
#SBATCH --mail-user=uceckz0@ucl.ac.uk
|
12 |
+
# 运行命令或脚本 wget https://repo.anaconda.com/archive/Anaconda3-2023.07-2-Linux-x86_64.sh
|
13 |
+
start=$(date +%s)
|
14 |
+
|
15 |
+
source $HOME/miniconda3/etc/profile.d/conda.sh
|
16 |
+
conda activate matbench_env
|
17 |
+
|
18 |
+
PYTHON_SCRIPT="process_cif.py"
|
19 |
+
|
20 |
+
# Check if the Python script exists
|
21 |
+
if [ ! -f "$PYTHON_SCRIPT" ]; then
|
22 |
+
echo "Python script not found: $PYTHON_SCRIPT"
|
23 |
+
exit 1
|
24 |
+
fi
|
25 |
+
|
26 |
+
# Run the Python script using Python interpreter
|
27 |
+
python "$PYTHON_SCRIPT"
|
28 |
+
|
29 |
+
echo "Processing complete!"
|
30 |
+
|
31 |
+
conda deactivate
|
32 |
+
|
33 |
+
|
34 |
+
end=$(date +%s)
|
35 |
+
elapsed=$(( end - start ))
|
36 |
+
hours=$(( elapsed / 3600 ))
|
37 |
+
minutes=$(( (elapsed % 3600) / 60 ))
|
38 |
+
seconds=$(( elapsed % 60 ))
|
39 |
+
printf "Elapsed time: %02d:%02d:%02d\n" $hours $minutes $seconds
|