Skip to content

Ladygin kpoint convergence #64

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 13 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions Ladygin/Changes.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
Day 1; 04/17/25

- Made base and qe classes for driver and tracker
- Tested on base case of Si


Day 2; 04/18/25

- Polished the code
- Finished the parser
- Extracted the Scheduler to separate file
- Organized as a lib
- Created main script

Day 3; 04/19/25

- Finished debugging lib and binary
- Added stats save
- Separated job_runner into separate class
- made remodelling of class inits

Day 4: 04/20/25

- Added parrallelization option to the driver and job scheduler
- Organized the lib in the modular fation
- separated the metrix into the class
- Started computing for other systems
- Made summery notebook

Day 5: 04/21/25

- Added pytest
- Finished calculation for 10 materials
- Removed unnesessury tests inside the folder
- Added error for vectors
72 changes: 72 additions & 0 deletions Ladygin/ConvTrack
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#!/pscratch/sd/v/vladygin/doped-Si_project/MLFF_TDEP/testbench/bin/python

import argparse

# Custom dependences
from ConvergenceTracker import *

def parser():
"""Parser of the arguments"""
parser = argparse.ArgumentParser(description='ConvTrack - tool for tracking kpoint convergence of point calcs')
parser.add_argument('-mode', type=str, default='qe', help='What software is used for simulation', choices= ['qe', 'vasp'])

# Arguments for the conv tracker
parser.add_argument('-workdir', type=str, default='./', help='Path to the working folder')
parser.add_argument('-target', type=str, default='total_energy', help='Target for error minimization')
parser.add_argument('-eps', type=float, default=1e-2, help='Convergence cryteria')


# Arguments for the calculator
parser.add_argument('-calc', type=str, default='shell', help='Which calculator to use (script or batch format will be supported)', choices= ['shell', 'par', 'job'])
parser.add_argument('-path_to_exec', type=str, default='/pscratch/sd/v/vladygin/tools/q-e_new/q-e/bin', help='Path to executables of the calculator')
parser.add_argument('-ncores', type=int, default=1, help='Number of cores for parralel run')
parser.add_argument('-nk', type=int, default=1, help='Kpoint parralelization settings for quantum espresso')

# Arguments for the driver
parser.add_argument('-input', type=str, default='pw.in', help='Input file with point calc settings')
parser.add_argument('-encut', type=float, default=40, help='Energy cutoff in Ry')


# Arguments for the scheduler
parser.add_argument('-k_sch', type=str, default='uniform', help='Which scheduler to use to obtain new kpoint')
parser.add_argument('-k_range', type=int, default= [2, 30, 2], nargs=3, help='k_start, k_end, k_step for uniform scheduler')

# Metrix mode
parser.add_argument('-metrix', type=str, default='mae', help='Which scheduler to use to obtain new kpoint')

args = parser.parse_args()
return args

if __name__ == "__main__":

args = parser()

if args.k_sch == 'uniform':
k_sch = kpoint_scheduler_uniform(*args.k_range)

if args.metrix == 'mae':
metrix = mae()

if args.mode == 'qe':
if args.calc == 'shell':
calculator = calculator_qe(args.path_to_exec)
if args.calc == 'par':
calculator = calculator_qe_par(args.path_to_exec, args.ncores, args.nk)
if args.calc == 'job':
calculator = calculator_qe_par(args.path_to_exec, args.ncores, args.nk, job = True, workdir = args.workdir)

driver = DriverQE(args.workdir, calculator, args.input, args.encut)


tracker = ConvergenceTrackerQE(workdir = args.workdir,
target = args.target,
eps = args.eps,
driver = driver,
k_sch = k_sch,
metrix = metrix)


k_list, target_list, errors = tracker.find_opt()


print(f"Optimal value of kpoint is {k_list[-1]} with error of {errors[-1]*1e3} meV")
6 changes: 6 additions & 0 deletions Ladygin/ConvergenceTracker/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from .driver import DriverQE
from .driver import calculator_qe, calculator_qe_par
from .search import kpoint_scheduler_uniform, mae
from .convergence_tracker_qe import ConvergenceTrackerQE

#__all__ = [DriverQE, kpoint_scheduler_uniform, ConvergenceTrackerQE]
79 changes: 79 additions & 0 deletions Ladygin/ConvergenceTracker/convergence_tracker_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from abc import ABC, abstractmethod
import pickle
import os


from .driver import Driver
from .search import kpoint_scheduler, metrix



class ConvergenceTracker(ABC):
"""Base class for convergence tracker
Contains methods nessasure for any conv tracker"""

@abstractmethod
def __init__(self, workdir: str,
target:str,
eps: float,
driver: Driver,
k_sch: kpoint_scheduler,
metrix: metrix,
**kwargs) -> None:
"""workdir - directory for input files and calculations
target - target property to optimize
encut - kinetic energy cutoff (in eV)
eps - convergence criteria (in meV)

driver - driver for calculations
k_sch - kpoint scheduler for seach kpoint gen
metrix - metrix to compute an error
"""

self.workdir = workdir
self.target = target
self.eps = eps
self.kpoint_opt = None # optimal kpoint value

self.driver = driver
self.k_sch = k_sch
self.metrix = metrix

@abstractmethod
def save_stat(self, k_list: list, target_list: list, errors: list) -> None:
"""Save stats of kpoint convergence iterations"""
raise NotImplementedError

@abstractmethod
def _step(self, k_curr: int) -> float:
"""Step of the kpoint scheduler

k_curr - current k point
"""
raise NotImplementedError

@abstractmethod
def find_opt(self) -> None:
"""Finds optimal kpoint by running simulations and refining the parameter until convergence reached

driver - drives for simulations
k_sch - kpoint generator
"""
raise NotImplementedError

def save(self, name:str) -> None:
"""save class as self.name.dat"""
self.name = name
file = open(self.name+'.dat','wb')
pickle.dump(self.__dict__, file)
file.close()

def load(self) -> None:
"""try load self.name.dat"""

file = open(self.name+'.dat','rb')

self.__dict__ = pickle.load(file)

def __repr__(self) -> str:
return f"Convergence Tracker of {self.target}) with eps equal to {self.eps} with results saved at {self.workdir}"
94 changes: 94 additions & 0 deletions Ladygin/ConvergenceTracker/convergence_tracker_qe.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
from .convergence_tracker_base import ConvergenceTracker

import os
import warnings

from .driver import DriverQE
from .search import kpoint_scheduler, kpoint_scheduler_uniform
from .search import metrix, mae

from .utils.exceptions import NotConvergedWarning
from .utils.consts import consts

class ConvergenceTrackerQE(ConvergenceTracker):
"""Implimentation of convergence tracker for Quantum Espresso Simulation"""

def __init__(self, workdir: str = './', target:str = 'total_energy', eps:float = 1e-2,
driver: DriverQE = DriverQE(),
k_sch: kpoint_scheduler = kpoint_scheduler_uniform(2, 30),
metrix: metrix = mae(),
**kwargs) -> None:
"""workdir - directory for input files and calculations
target - target property to optimize
encut - kinetic energy cutoff (in eV)
eps - convergence criteria (in meV)

driver - driver for calculations
k_sch - kpoint scheduler for seach kpoint gen
metrix - metrix to compute an error
"""
super().__init__(workdir, target, eps, driver, k_sch, metrix, **kwargs)


def save_stat(self, k_list: list, target_list: list, errors: list) -> None:
"""Save stats of kpoint convergence iterations"""

out_path = os.path.join(self.workdir, 'stats.dat')
with open(out_path, 'w') as f:
f.write(f"kpoint {self.target} error\n")
for i in range(len(k_list)):
f.write('%d %.8f %.8f\n' % (k_list[i], target_list[i], errors[i]))

def _step(self, k_curr: int) -> float:
"""Step of the kpoint scheduler

k_curr - current k point
"""
self.driver.gen_input(k_curr)
self.driver.calc()
return self.driver.extract_target(self.target)

def find_opt(self) -> tuple:
"""Finds optimal kpoint by running simulations and refining the parameter until convergence reached

Returns:
k_list - list of kpoints
target_list - list of target properties
errors - list of error in target properties
"""

# Ensure working in same directory
try:
assert self.driver.workdir == self.workdir
except AssertionError:
self.driver.workdir = self.workdir

# Start with 2 kpoints to estimate error
target_list = [float('inf')]
errors = [float('inf')]
k_curr = float('inf')
k_list = []

it = 1
print(f"iter, k_curr, {self.target}, error")
while errors[-1] > self.eps * 1e-3:
k_curr = self.k_sch.get_next()
if k_curr < 0:
break
k_list.append(k_curr)

target_curr = self._step(k_curr)

target_list.append(target_curr)
errors.append(self.metrix(target_list[-1], target_list[-2]))

print("%d %d %.6f %.6f" % (it, k_curr, target_curr, errors[-1]))
it += 1

if errors[-1] > self.eps * 1e-3:
warnings.warn("The search procedure not converged. Result will return last kpoint and error", NotConvergedWarning)

# Saving statistics
self.save_stat(k_list[1:], target_list[2:], errors[2:])

return k_list[1:], target_list[2:], errors[2:]
4 changes: 4 additions & 0 deletions Ladygin/ConvergenceTracker/driver/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .driver_base import Driver
from .driver_qe import DriverQE

from .calculator import calculator, calculator_qe, calculator_qe_par
2 changes: 2 additions & 0 deletions Ladygin/ConvergenceTracker/driver/calculator/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .calculator_base import calculator
from .calculator_qe import calculator_qe, calculator_qe_par
14 changes: 14 additions & 0 deletions Ladygin/ConvergenceTracker/driver/calculator/calculator_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from abc import ABC, abstractmethod

class calculator(ABC):
"""Class for point calculations"""

@abstractmethod
def __init__(self, path_to_exec: str, **kwargs) -> None:
"""path_to_exec - path to executable"""
self.path_to_exec = path_to_exec

@abstractmethod
def run_scf(self, input_file:str, output_file:str) -> None:
"""Runs scf calculation"""
raise NotImplementedError
72 changes: 72 additions & 0 deletions Ladygin/ConvergenceTracker/driver/calculator/calculator_qe.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import subprocess as sp
import os

from .calculator_base import calculator
from .utils import wait_qe

class calculator_qe(calculator):
"""Class for point calculations using quantum espresso"""


def __init__(self, path_to_exec:str = "/pscratch/sd/v/vladygin/tools/q-e_new/q-e/bin", **kwargs) -> None:
"""path_to_exec - path to executable"""
super().__init__(path_to_exec, **kwargs)


def run_scf(self, input_file:str, output_file:str) -> None:
"""Runs scf calculations

input_file - input settings
ouput_file - output file to save results
"""
binary = os.path.join(self.path_to_exec, "pw.x")
sp.run(f"{binary} -input {input_file} > {output_file}", shell = True)

class calculator_qe_par(calculator_qe):
"""Class for point calculations using quantum espresso using parralelization along q"""


def __init__(self, path_to_exec:str = "/pscratch/sd/v/vladygin/tools/q-e_new/q-e/bin",
ncores: int = 1, nk: int = 1,
job: bool = False,
workdir: str = None,
**kwargs) -> None:
"""path_to_exec - path to executable (kept for testing perposes better remove)
ncores - number of cores for parallelization
nk - quantum espresso parallelization setting along q
"""
super().__init__(path_to_exec, **kwargs)

self.ncores = ncores
self.nk = nk

self.job = job
if job:
if type(workdir) == type(None):
raise ValueError("For job calculation, one should provide a directory")
self.workdir = workdir

def run_scf(self, input_file:str, output_file:str) -> None:
"""Runs scf calculations

input_file - input settings
ouput_file - output file to save results
job - submit job or run in shell
"""
binary = os.path.join(self.path_to_exec, "pw.x")

if not self.job:
sp.run(f"srun -n {self.ncores} {binary} -nk {self.nk} -input {input_file} > {output_file}", shell = True)
else:
job_file = os.path.join(self.workdir, "calc.sh")
# Designed to run on one node
if self.ncores > 128:
raise ValueError("Number of cores is greater than number of cores per node")

with open(f"{job_file}", "w") as file:
file.write(f"#!/bin/bash\n")
file.write(f"srun -n {self.ncores} {binary} -nk {self.nk} -input {input_file} > {output_file}\n")

os.remove(output_file)
sp.run(f"sbatch -J qe_scf -q premium -C cpu -t 1:00:00 -N 1 -n {self.ncores} {job_file}", shell = True)
wait_qe(output_file)
Loading